2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_display_debugfs.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dp_mst.h"
51 #include "display/intel_dpll.h"
52 #include "display/intel_dpll_mgr.h"
53 #include "display/intel_dsi.h"
54 #include "display/intel_dvo.h"
55 #include "display/intel_gmbus.h"
56 #include "display/intel_hdmi.h"
57 #include "display/intel_lvds.h"
58 #include "display/intel_sdvo.h"
59 #include "display/intel_tv.h"
60 #include "display/intel_vdsc.h"
61 #include "display/intel_vrr.h"
63 #include "gem/i915_gem_object.h"
65 #include "gt/intel_rps.h"
68 #include "i915_trace.h"
69 #include "intel_acpi.h"
70 #include "intel_atomic.h"
71 #include "intel_atomic_plane.h"
73 #include "intel_cdclk.h"
74 #include "intel_color.h"
75 #include "intel_crtc.h"
76 #include "intel_csr.h"
77 #include "intel_display_types.h"
78 #include "intel_dp_link_training.h"
79 #include "intel_fbc.h"
80 #include "intel_fdi.h"
81 #include "intel_fbdev.h"
82 #include "intel_fifo_underrun.h"
83 #include "intel_frontbuffer.h"
84 #include "intel_hdcp.h"
85 #include "intel_hotplug.h"
86 #include "intel_overlay.h"
87 #include "intel_pipe_crc.h"
89 #include "intel_pps.h"
90 #include "intel_psr.h"
91 #include "intel_quirks.h"
92 #include "intel_sideband.h"
93 #include "intel_sprite.h"
95 #include "intel_vga.h"
96 #include "i9xx_plane.h"
97 #include "skl_scaler.h"
98 #include "skl_universal_plane.h"
100 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
101 struct intel_crtc_state *pipe_config);
102 static void ilk_pch_clock_get(struct intel_crtc *crtc,
103 struct intel_crtc_state *pipe_config);
105 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
106 struct drm_i915_gem_object *obj,
107 struct drm_mode_fb_cmd2 *mode_cmd);
108 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
109 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
110 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
111 const struct intel_link_m_n *m_n,
112 const struct intel_link_m_n *m2_n2);
113 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
114 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
115 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
116 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
117 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
118 static void intel_modeset_setup_hw_state(struct drm_device *dev,
119 struct drm_modeset_acquire_ctx *ctx);
121 /* returns HPLL frequency in kHz */
122 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
124 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
126 /* Obtain SKU information */
127 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
128 CCK_FUSE_HPLL_FREQ_MASK;
130 return vco_freq[hpll_freq] * 1000;
133 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
134 const char *name, u32 reg, int ref_freq)
139 val = vlv_cck_read(dev_priv, reg);
140 divider = val & CCK_FREQUENCY_VALUES;
142 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
143 (divider << CCK_FREQUENCY_STATUS_SHIFT),
144 "%s change in progress\n", name);
146 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
149 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
150 const char *name, u32 reg)
154 vlv_cck_get(dev_priv);
156 if (dev_priv->hpll_freq == 0)
157 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
159 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
161 vlv_cck_put(dev_priv);
166 static void intel_update_czclk(struct drm_i915_private *dev_priv)
168 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
171 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
172 CCK_CZ_CLOCK_CONTROL);
174 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
175 dev_priv->czclk_freq);
178 /* WA Display #0827: Gen9:all */
180 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
183 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
184 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
186 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
187 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
190 /* Wa_2006604312:icl,ehl */
192 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
196 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
197 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
199 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
200 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
204 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
206 return crtc_state->master_transcoder != INVALID_TRANSCODER;
210 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
212 return crtc_state->sync_mode_slaves_mask != 0;
216 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
218 return is_trans_port_sync_master(crtc_state) ||
219 is_trans_port_sync_slave(crtc_state);
222 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
225 i915_reg_t reg = PIPEDSL(pipe);
229 if (IS_GEN(dev_priv, 2))
230 line_mask = DSL_LINEMASK_GEN2;
232 line_mask = DSL_LINEMASK_GEN3;
234 line1 = intel_de_read(dev_priv, reg) & line_mask;
236 line2 = intel_de_read(dev_priv, reg) & line_mask;
238 return line1 != line2;
241 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
243 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
244 enum pipe pipe = crtc->pipe;
246 /* Wait for the display line to settle/start moving */
247 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
248 drm_err(&dev_priv->drm,
249 "pipe %c scanline %s wait timed out\n",
250 pipe_name(pipe), onoff(state));
253 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
255 wait_for_pipe_scanline_moving(crtc, false);
258 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
260 wait_for_pipe_scanline_moving(crtc, true);
264 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
266 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
267 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 if (INTEL_GEN(dev_priv) >= 4) {
270 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
271 i915_reg_t reg = PIPECONF(cpu_transcoder);
273 /* Wait for the Pipe State to go off */
274 if (intel_de_wait_for_clear(dev_priv, reg,
275 I965_PIPECONF_ACTIVE, 100))
276 drm_WARN(&dev_priv->drm, 1,
277 "pipe_off wait timed out\n");
279 intel_wait_for_pipe_scanline_stopped(crtc);
283 /* Only for pre-ILK configs */
284 void assert_pll(struct drm_i915_private *dev_priv,
285 enum pipe pipe, bool state)
290 val = intel_de_read(dev_priv, DPLL(pipe));
291 cur_state = !!(val & DPLL_VCO_ENABLE);
292 I915_STATE_WARN(cur_state != state,
293 "PLL state assertion failure (expected %s, current %s)\n",
294 onoff(state), onoff(cur_state));
297 /* XXX: the dsi pll is shared between MIPI DSI ports */
298 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
303 vlv_cck_get(dev_priv);
304 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
305 vlv_cck_put(dev_priv);
307 cur_state = val & DSI_PLL_VCO_EN;
308 I915_STATE_WARN(cur_state != state,
309 "DSI PLL state assertion failure (expected %s, current %s)\n",
310 onoff(state), onoff(cur_state));
313 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
314 enum pipe pipe, bool state)
318 if (HAS_DDI(dev_priv)) {
320 * DDI does not have a specific FDI_TX register.
322 * FDI is never fed from EDP transcoder
323 * so pipe->transcoder cast is fine here.
325 enum transcoder cpu_transcoder = (enum transcoder)pipe;
326 u32 val = intel_de_read(dev_priv,
327 TRANS_DDI_FUNC_CTL(cpu_transcoder));
328 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
330 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
331 cur_state = !!(val & FDI_TX_ENABLE);
333 I915_STATE_WARN(cur_state != state,
334 "FDI TX state assertion failure (expected %s, current %s)\n",
335 onoff(state), onoff(cur_state));
337 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
338 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
340 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
341 enum pipe pipe, bool state)
346 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
347 cur_state = !!(val & FDI_RX_ENABLE);
348 I915_STATE_WARN(cur_state != state,
349 "FDI RX state assertion failure (expected %s, current %s)\n",
350 onoff(state), onoff(cur_state));
352 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
353 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
355 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
360 /* ILK FDI PLL is always enabled */
361 if (IS_GEN(dev_priv, 5))
364 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
365 if (HAS_DDI(dev_priv))
368 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
369 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
372 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
373 enum pipe pipe, bool state)
378 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
379 cur_state = !!(val & FDI_RX_PLL_ENABLE);
380 I915_STATE_WARN(cur_state != state,
381 "FDI RX PLL assertion failure (expected %s, current %s)\n",
382 onoff(state), onoff(cur_state));
385 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
389 enum pipe panel_pipe = INVALID_PIPE;
392 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
395 if (HAS_PCH_SPLIT(dev_priv)) {
398 pp_reg = PP_CONTROL(0);
399 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
402 case PANEL_PORT_SELECT_LVDS:
403 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
405 case PANEL_PORT_SELECT_DPA:
406 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
408 case PANEL_PORT_SELECT_DPC:
409 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
411 case PANEL_PORT_SELECT_DPD:
412 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
415 MISSING_CASE(port_sel);
418 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
419 /* presumably write lock depends on pipe, not port select */
420 pp_reg = PP_CONTROL(pipe);
425 pp_reg = PP_CONTROL(0);
426 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
428 drm_WARN_ON(&dev_priv->drm,
429 port_sel != PANEL_PORT_SELECT_LVDS);
430 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
433 val = intel_de_read(dev_priv, pp_reg);
434 if (!(val & PANEL_POWER_ON) ||
435 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
438 I915_STATE_WARN(panel_pipe == pipe && locked,
439 "panel assertion failure, pipe %c regs locked\n",
443 void assert_pipe(struct drm_i915_private *dev_priv,
444 enum transcoder cpu_transcoder, bool state)
447 enum intel_display_power_domain power_domain;
448 intel_wakeref_t wakeref;
450 /* we keep both pipes enabled on 830 */
451 if (IS_I830(dev_priv))
454 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
455 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
457 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
458 cur_state = !!(val & PIPECONF_ENABLE);
460 intel_display_power_put(dev_priv, power_domain, wakeref);
465 I915_STATE_WARN(cur_state != state,
466 "transcoder %s assertion failure (expected %s, current %s)\n",
467 transcoder_name(cpu_transcoder),
468 onoff(state), onoff(cur_state));
471 static void assert_plane(struct intel_plane *plane, bool state)
476 cur_state = plane->get_hw_state(plane, &pipe);
478 I915_STATE_WARN(cur_state != state,
479 "%s assertion failure (expected %s, current %s)\n",
480 plane->base.name, onoff(state), onoff(cur_state));
483 #define assert_plane_enabled(p) assert_plane(p, true)
484 #define assert_plane_disabled(p) assert_plane(p, false)
486 static void assert_planes_disabled(struct intel_crtc *crtc)
488 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
489 struct intel_plane *plane;
491 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
492 assert_plane_disabled(plane);
495 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
501 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
502 enabled = !!(val & TRANS_ENABLE);
503 I915_STATE_WARN(enabled,
504 "transcoder assertion failed, should be off on pipe %c but is still active\n",
508 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
509 enum pipe pipe, enum port port,
515 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
517 I915_STATE_WARN(state && port_pipe == pipe,
518 "PCH DP %c enabled on transcoder %c, should be disabled\n",
519 port_name(port), pipe_name(pipe));
521 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
522 "IBX PCH DP %c still using transcoder B\n",
526 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
527 enum pipe pipe, enum port port,
533 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
535 I915_STATE_WARN(state && port_pipe == pipe,
536 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
537 port_name(port), pipe_name(pipe));
539 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
540 "IBX PCH HDMI %c still using transcoder B\n",
544 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
549 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
550 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
551 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
553 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
555 "PCH VGA enabled on transcoder %c, should be disabled\n",
558 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
560 "PCH LVDS enabled on transcoder %c, should be disabled\n",
563 /* PCH SDVOB multiplex with HDMIB */
564 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
565 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
566 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
569 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
570 struct intel_digital_port *dig_port,
571 unsigned int expected_mask)
576 switch (dig_port->base.port) {
578 port_mask = DPLL_PORTB_READY_MASK;
582 port_mask = DPLL_PORTC_READY_MASK;
587 port_mask = DPLL_PORTD_READY_MASK;
588 dpll_reg = DPIO_PHY_STATUS;
594 if (intel_de_wait_for_register(dev_priv, dpll_reg,
595 port_mask, expected_mask, 1000))
596 drm_WARN(&dev_priv->drm, 1,
597 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
598 dig_port->base.base.base.id, dig_port->base.base.name,
599 intel_de_read(dev_priv, dpll_reg) & port_mask,
603 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
605 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
606 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
607 enum pipe pipe = crtc->pipe;
609 u32 val, pipeconf_val;
611 /* Make sure PCH DPLL is enabled */
612 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
614 /* FDI must be feeding us bits for PCH ports */
615 assert_fdi_tx_enabled(dev_priv, pipe);
616 assert_fdi_rx_enabled(dev_priv, pipe);
618 if (HAS_PCH_CPT(dev_priv)) {
619 reg = TRANS_CHICKEN2(pipe);
620 val = intel_de_read(dev_priv, reg);
622 * Workaround: Set the timing override bit
623 * before enabling the pch transcoder.
625 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
626 /* Configure frame start delay to match the CPU */
627 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
628 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
629 intel_de_write(dev_priv, reg, val);
632 reg = PCH_TRANSCONF(pipe);
633 val = intel_de_read(dev_priv, reg);
634 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
636 if (HAS_PCH_IBX(dev_priv)) {
637 /* Configure frame start delay to match the CPU */
638 val &= ~TRANS_FRAME_START_DELAY_MASK;
639 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
642 * Make the BPC in transcoder be consistent with
643 * that in pipeconf reg. For HDMI we must use 8bpc
644 * here for both 8bpc and 12bpc.
646 val &= ~PIPECONF_BPC_MASK;
647 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
648 val |= PIPECONF_8BPC;
650 val |= pipeconf_val & PIPECONF_BPC_MASK;
653 val &= ~TRANS_INTERLACE_MASK;
654 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
655 if (HAS_PCH_IBX(dev_priv) &&
656 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
657 val |= TRANS_LEGACY_INTERLACED_ILK;
659 val |= TRANS_INTERLACED;
661 val |= TRANS_PROGRESSIVE;
664 intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
665 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
666 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
670 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
671 enum transcoder cpu_transcoder)
673 u32 val, pipeconf_val;
675 /* FDI must be feeding us bits for PCH ports */
676 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
677 assert_fdi_rx_enabled(dev_priv, PIPE_A);
679 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
680 /* Workaround: set timing override bit. */
681 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
682 /* Configure frame start delay to match the CPU */
683 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
684 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
685 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
688 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
690 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
691 PIPECONF_INTERLACED_ILK)
692 val |= TRANS_INTERLACED;
694 val |= TRANS_PROGRESSIVE;
696 intel_de_write(dev_priv, LPT_TRANSCONF, val);
697 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
698 TRANS_STATE_ENABLE, 100))
699 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
702 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
708 /* FDI relies on the transcoder */
709 assert_fdi_tx_disabled(dev_priv, pipe);
710 assert_fdi_rx_disabled(dev_priv, pipe);
712 /* Ports must be off as well */
713 assert_pch_ports_disabled(dev_priv, pipe);
715 reg = PCH_TRANSCONF(pipe);
716 val = intel_de_read(dev_priv, reg);
717 val &= ~TRANS_ENABLE;
718 intel_de_write(dev_priv, reg, val);
719 /* wait for PCH transcoder off, transcoder state */
720 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
721 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
724 if (HAS_PCH_CPT(dev_priv)) {
725 /* Workaround: Clear the timing override chicken bit again. */
726 reg = TRANS_CHICKEN2(pipe);
727 val = intel_de_read(dev_priv, reg);
728 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
729 intel_de_write(dev_priv, reg, val);
733 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
737 val = intel_de_read(dev_priv, LPT_TRANSCONF);
738 val &= ~TRANS_ENABLE;
739 intel_de_write(dev_priv, LPT_TRANSCONF, val);
740 /* wait for PCH transcoder off, transcoder state */
741 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
742 TRANS_STATE_ENABLE, 50))
743 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
745 /* Workaround: clear timing override bit. */
746 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
747 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
748 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
751 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
755 if (HAS_PCH_LPT(dev_priv))
761 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
763 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
764 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
765 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
766 enum pipe pipe = crtc->pipe;
770 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
772 assert_planes_disabled(crtc);
775 * A pipe without a PLL won't actually be able to drive bits from
776 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
779 if (HAS_GMCH(dev_priv)) {
780 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
781 assert_dsi_pll_enabled(dev_priv);
783 assert_pll_enabled(dev_priv, pipe);
785 if (new_crtc_state->has_pch_encoder) {
786 /* if driving the PCH, we need FDI enabled */
787 assert_fdi_rx_pll_enabled(dev_priv,
788 intel_crtc_pch_transcoder(crtc));
789 assert_fdi_tx_pll_enabled(dev_priv,
790 (enum pipe) cpu_transcoder);
792 /* FIXME: assert CPU port conditions for SNB+ */
795 trace_intel_pipe_enable(crtc);
797 reg = PIPECONF(cpu_transcoder);
798 val = intel_de_read(dev_priv, reg);
799 if (val & PIPECONF_ENABLE) {
800 /* we keep both pipes enabled on 830 */
801 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
805 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
806 intel_de_posting_read(dev_priv, reg);
809 * Until the pipe starts PIPEDSL reads will return a stale value,
810 * which causes an apparent vblank timestamp jump when PIPEDSL
811 * resets to its proper value. That also messes up the frame count
812 * when it's derived from the timestamps. So let's wait for the
813 * pipe to start properly before we call drm_crtc_vblank_on()
815 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
816 intel_wait_for_pipe_scanline_moving(crtc);
819 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
821 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
822 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
823 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
824 enum pipe pipe = crtc->pipe;
828 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
831 * Make sure planes won't keep trying to pump pixels to us,
832 * or we might hang the display.
834 assert_planes_disabled(crtc);
836 trace_intel_pipe_disable(crtc);
838 reg = PIPECONF(cpu_transcoder);
839 val = intel_de_read(dev_priv, reg);
840 if ((val & PIPECONF_ENABLE) == 0)
844 * Double wide has implications for planes
845 * so best keep it disabled when not needed.
847 if (old_crtc_state->double_wide)
848 val &= ~PIPECONF_DOUBLE_WIDE;
850 /* Don't disable pipe or pipe PLLs if needed */
851 if (!IS_I830(dev_priv))
852 val &= ~PIPECONF_ENABLE;
854 intel_de_write(dev_priv, reg, val);
855 if ((val & PIPECONF_ENABLE) == 0)
856 intel_wait_for_pipe_off(old_crtc_state);
859 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
861 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
864 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
866 if (is_ccs_modifier(fb->modifier))
867 return is_ccs_plane(fb, plane);
873 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
876 return info->is_yuv &&
877 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
880 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
883 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
888 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
890 struct drm_i915_private *dev_priv = to_i915(fb->dev);
891 unsigned int cpp = fb->format->cpp[color_plane];
893 switch (fb->modifier) {
894 case DRM_FORMAT_MOD_LINEAR:
895 return intel_tile_size(dev_priv);
896 case I915_FORMAT_MOD_X_TILED:
897 if (IS_GEN(dev_priv, 2))
901 case I915_FORMAT_MOD_Y_TILED_CCS:
902 if (is_ccs_plane(fb, color_plane))
905 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
906 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
907 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
908 if (is_ccs_plane(fb, color_plane))
911 case I915_FORMAT_MOD_Y_TILED:
912 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
916 case I915_FORMAT_MOD_Yf_TILED_CCS:
917 if (is_ccs_plane(fb, color_plane))
920 case I915_FORMAT_MOD_Yf_TILED:
936 MISSING_CASE(fb->modifier);
942 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
944 if (is_gen12_ccs_plane(fb, color_plane))
947 return intel_tile_size(to_i915(fb->dev)) /
948 intel_tile_width_bytes(fb, color_plane);
951 /* Return the tile dimensions in pixel units */
952 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
953 unsigned int *tile_width,
954 unsigned int *tile_height)
956 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
957 unsigned int cpp = fb->format->cpp[color_plane];
959 *tile_width = tile_width_bytes / cpp;
960 *tile_height = intel_tile_height(fb, color_plane);
963 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
966 unsigned int tile_width, tile_height;
968 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
970 return fb->pitches[color_plane] * tile_height;
974 intel_fb_align_height(const struct drm_framebuffer *fb,
975 int color_plane, unsigned int height)
977 unsigned int tile_height = intel_tile_height(fb, color_plane);
979 return ALIGN(height, tile_height);
982 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
984 unsigned int size = 0;
987 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
988 size += rot_info->plane[i].width * rot_info->plane[i].height;
993 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
995 unsigned int size = 0;
998 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
999 size += rem_info->plane[i].width * rem_info->plane[i].height;
1005 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1006 const struct drm_framebuffer *fb,
1007 unsigned int rotation)
1009 view->type = I915_GGTT_VIEW_NORMAL;
1010 if (drm_rotation_90_or_270(rotation)) {
1011 view->type = I915_GGTT_VIEW_ROTATED;
1012 view->rotated = to_intel_framebuffer(fb)->rot_info;
1016 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1018 if (IS_I830(dev_priv))
1020 else if (IS_I85X(dev_priv))
1022 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1028 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1030 if (INTEL_GEN(dev_priv) >= 9)
1032 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1033 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1035 else if (INTEL_GEN(dev_priv) >= 4)
1041 static bool has_async_flips(struct drm_i915_private *i915)
1043 return INTEL_GEN(i915) >= 5;
1046 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1049 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1051 /* AUX_DIST needs only 4K alignment */
1052 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
1053 is_ccs_plane(fb, color_plane))
1056 switch (fb->modifier) {
1057 case DRM_FORMAT_MOD_LINEAR:
1058 return intel_linear_alignment(dev_priv);
1059 case I915_FORMAT_MOD_X_TILED:
1060 if (has_async_flips(dev_priv))
1063 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1064 if (is_semiplanar_uv_plane(fb, color_plane))
1065 return intel_tile_row_size(fb, color_plane);
1067 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1068 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1070 case I915_FORMAT_MOD_Y_TILED_CCS:
1071 case I915_FORMAT_MOD_Yf_TILED_CCS:
1072 case I915_FORMAT_MOD_Y_TILED:
1073 if (INTEL_GEN(dev_priv) >= 12 &&
1074 is_semiplanar_uv_plane(fb, color_plane))
1075 return intel_tile_row_size(fb, color_plane);
1077 case I915_FORMAT_MOD_Yf_TILED:
1078 return 1 * 1024 * 1024;
1080 MISSING_CASE(fb->modifier);
1085 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1087 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1088 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1090 return INTEL_GEN(dev_priv) < 4 ||
1092 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
1096 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1097 const struct i915_ggtt_view *view,
1099 unsigned long *out_flags)
1101 struct drm_device *dev = fb->dev;
1102 struct drm_i915_private *dev_priv = to_i915(dev);
1103 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1104 intel_wakeref_t wakeref;
1105 struct i915_vma *vma;
1106 unsigned int pinctl;
1109 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1110 return ERR_PTR(-EINVAL);
1112 alignment = intel_surf_alignment(fb, 0);
1113 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1114 return ERR_PTR(-EINVAL);
1116 /* Note that the w/a also requires 64 PTE of padding following the
1117 * bo. We currently fill all unused PTE with the shadow page and so
1118 * we should always have valid PTE following the scanout preventing
1121 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1122 alignment = 256 * 1024;
1125 * Global gtt pte registers are special registers which actually forward
1126 * writes to a chunk of system memory. Which means that there is no risk
1127 * that the register values disappear as soon as we call
1128 * intel_runtime_pm_put(), so it is correct to wrap only the
1129 * pin/unpin/fence and not more.
1131 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1133 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1136 * Valleyview is definitely limited to scanning out the first
1137 * 512MiB. Lets presume this behaviour was inherited from the
1138 * g4x display engine and that all earlier gen are similarly
1139 * limited. Testing suggests that it is a little more
1140 * complicated than this. For example, Cherryview appears quite
1141 * happy to scanout from anywhere within its global aperture.
1144 if (HAS_GMCH(dev_priv))
1145 pinctl |= PIN_MAPPABLE;
1147 vma = i915_gem_object_pin_to_display_plane(obj,
1148 alignment, view, pinctl);
1152 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1156 * Install a fence for tiled scan-out. Pre-i965 always needs a
1157 * fence, whereas 965+ only requires a fence if using
1158 * framebuffer compression. For simplicity, we always, when
1159 * possible, install a fence as the cost is not that onerous.
1161 * If we fail to fence the tiled scanout, then either the
1162 * modeset will reject the change (which is highly unlikely as
1163 * the affected systems, all but one, do not have unmappable
1164 * space) or we will not be able to enable full powersaving
1165 * techniques (also likely not to apply due to various limits
1166 * FBC and the like impose on the size of the buffer, which
1167 * presumably we violated anyway with this unmappable buffer).
1168 * Anyway, it is presumably better to stumble onwards with
1169 * something and try to run the system in a "less than optimal"
1170 * mode that matches the user configuration.
1172 ret = i915_vma_pin_fence(vma);
1173 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1174 i915_gem_object_unpin_from_display_plane(vma);
1179 if (ret == 0 && vma->fence)
1180 *out_flags |= PLANE_HAS_FENCE;
1185 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1186 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1190 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1192 i915_gem_object_lock(vma->obj, NULL);
1193 if (flags & PLANE_HAS_FENCE)
1194 i915_vma_unpin_fence(vma);
1195 i915_gem_object_unpin_from_display_plane(vma);
1196 i915_gem_object_unlock(vma->obj);
1201 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
1202 unsigned int rotation)
1204 if (drm_rotation_90_or_270(rotation))
1205 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
1207 return fb->pitches[color_plane];
1211 * Convert the x/y offsets into a linear offset.
1212 * Only valid with 0/180 degree rotation, which is fine since linear
1213 * offset is only used with linear buffers on pre-hsw and tiled buffers
1214 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1216 u32 intel_fb_xy_to_linear(int x, int y,
1217 const struct intel_plane_state *state,
1220 const struct drm_framebuffer *fb = state->hw.fb;
1221 unsigned int cpp = fb->format->cpp[color_plane];
1222 unsigned int pitch = state->color_plane[color_plane].stride;
1224 return y * pitch + x * cpp;
1228 * Add the x/y offsets derived from fb->offsets[] to the user
1229 * specified plane src x/y offsets. The resulting x/y offsets
1230 * specify the start of scanout from the beginning of the gtt mapping.
1232 void intel_add_fb_offsets(int *x, int *y,
1233 const struct intel_plane_state *state,
1237 *x += state->color_plane[color_plane].x;
1238 *y += state->color_plane[color_plane].y;
1241 static u32 intel_adjust_tile_offset(int *x, int *y,
1242 unsigned int tile_width,
1243 unsigned int tile_height,
1244 unsigned int tile_size,
1245 unsigned int pitch_tiles,
1249 unsigned int pitch_pixels = pitch_tiles * tile_width;
1252 WARN_ON(old_offset & (tile_size - 1));
1253 WARN_ON(new_offset & (tile_size - 1));
1254 WARN_ON(new_offset > old_offset);
1256 tiles = (old_offset - new_offset) / tile_size;
1258 *y += tiles / pitch_tiles * tile_height;
1259 *x += tiles % pitch_tiles * tile_width;
1261 /* minimize x in case it got needlessly big */
1262 *y += *x / pitch_pixels * tile_height;
1268 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
1270 return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
1271 is_gen12_ccs_plane(fb, color_plane);
1274 static u32 intel_adjust_aligned_offset(int *x, int *y,
1275 const struct drm_framebuffer *fb,
1277 unsigned int rotation,
1279 u32 old_offset, u32 new_offset)
1281 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1282 unsigned int cpp = fb->format->cpp[color_plane];
1284 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
1286 if (!is_surface_linear(fb, color_plane)) {
1287 unsigned int tile_size, tile_width, tile_height;
1288 unsigned int pitch_tiles;
1290 tile_size = intel_tile_size(dev_priv);
1291 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1293 if (drm_rotation_90_or_270(rotation)) {
1294 pitch_tiles = pitch / tile_height;
1295 swap(tile_width, tile_height);
1297 pitch_tiles = pitch / (tile_width * cpp);
1300 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1301 tile_size, pitch_tiles,
1302 old_offset, new_offset);
1304 old_offset += *y * pitch + *x * cpp;
1306 *y = (old_offset - new_offset) / pitch;
1307 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
1314 * Adjust the tile offset by moving the difference into
1317 u32 intel_plane_adjust_aligned_offset(int *x, int *y,
1318 const struct intel_plane_state *state,
1320 u32 old_offset, u32 new_offset)
1322 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
1324 state->color_plane[color_plane].stride,
1325 old_offset, new_offset);
1329 * Computes the aligned offset to the base tile and adjusts
1330 * x, y. bytes per pixel is assumed to be a power-of-two.
1332 * In the 90/270 rotated case, x and y are assumed
1333 * to be already rotated to match the rotated GTT view, and
1334 * pitch is the tile_height aligned framebuffer height.
1336 * This function is used when computing the derived information
1337 * under intel_framebuffer, so using any of that information
1338 * here is not allowed. Anything under drm_framebuffer can be
1339 * used. This is why the user has to pass in the pitch since it
1340 * is specified in the rotated orientation.
1342 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
1344 const struct drm_framebuffer *fb,
1347 unsigned int rotation,
1350 unsigned int cpp = fb->format->cpp[color_plane];
1351 u32 offset, offset_aligned;
1353 if (!is_surface_linear(fb, color_plane)) {
1354 unsigned int tile_size, tile_width, tile_height;
1355 unsigned int tile_rows, tiles, pitch_tiles;
1357 tile_size = intel_tile_size(dev_priv);
1358 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1360 if (drm_rotation_90_or_270(rotation)) {
1361 pitch_tiles = pitch / tile_height;
1362 swap(tile_width, tile_height);
1364 pitch_tiles = pitch / (tile_width * cpp);
1367 tile_rows = *y / tile_height;
1370 tiles = *x / tile_width;
1373 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
1375 offset_aligned = offset;
1377 offset_aligned = rounddown(offset_aligned, alignment);
1379 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1380 tile_size, pitch_tiles,
1381 offset, offset_aligned);
1383 offset = *y * pitch + *x * cpp;
1384 offset_aligned = offset;
1386 offset_aligned = rounddown(offset_aligned, alignment);
1387 *y = (offset % alignment) / pitch;
1388 *x = ((offset % alignment) - *y * pitch) / cpp;
1394 return offset_aligned;
1397 u32 intel_plane_compute_aligned_offset(int *x, int *y,
1398 const struct intel_plane_state *state,
1401 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
1402 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
1403 const struct drm_framebuffer *fb = state->hw.fb;
1404 unsigned int rotation = state->hw.rotation;
1405 int pitch = state->color_plane[color_plane].stride;
1408 if (intel_plane->id == PLANE_CURSOR)
1409 alignment = intel_cursor_alignment(dev_priv);
1411 alignment = intel_surf_alignment(fb, color_plane);
1413 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
1414 pitch, rotation, alignment);
1417 /* Convert the fb->offset[] into x/y offsets */
1418 static int intel_fb_offset_to_xy(int *x, int *y,
1419 const struct drm_framebuffer *fb,
1422 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1423 unsigned int height;
1426 if (INTEL_GEN(dev_priv) >= 12 &&
1427 is_semiplanar_uv_plane(fb, color_plane))
1428 alignment = intel_tile_row_size(fb, color_plane);
1429 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
1430 alignment = intel_tile_size(dev_priv);
1434 if (alignment != 0 && fb->offsets[color_plane] % alignment) {
1435 drm_dbg_kms(&dev_priv->drm,
1436 "Misaligned offset 0x%08x for color plane %d\n",
1437 fb->offsets[color_plane], color_plane);
1441 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
1442 height = ALIGN(height, intel_tile_height(fb, color_plane));
1444 /* Catch potential overflows early */
1445 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
1446 fb->offsets[color_plane])) {
1447 drm_dbg_kms(&dev_priv->drm,
1448 "Bad offset 0x%08x or pitch %d for color plane %d\n",
1449 fb->offsets[color_plane], fb->pitches[color_plane],
1457 intel_adjust_aligned_offset(x, y,
1458 fb, color_plane, DRM_MODE_ROTATE_0,
1459 fb->pitches[color_plane],
1460 fb->offsets[color_plane], 0);
1465 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1467 switch (fb_modifier) {
1468 case I915_FORMAT_MOD_X_TILED:
1469 return I915_TILING_X;
1470 case I915_FORMAT_MOD_Y_TILED:
1471 case I915_FORMAT_MOD_Y_TILED_CCS:
1472 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1473 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1474 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1475 return I915_TILING_Y;
1477 return I915_TILING_NONE;
1482 * From the Sky Lake PRM:
1483 * "The Color Control Surface (CCS) contains the compression status of
1484 * the cache-line pairs. The compression state of the cache-line pair
1485 * is specified by 2 bits in the CCS. Each CCS cache-line represents
1486 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1487 * cache-line-pairs. CCS is always Y tiled."
1489 * Since cache line pairs refers to horizontally adjacent cache lines,
1490 * each cache line in the CCS corresponds to an area of 32x16 cache
1491 * lines on the main surface. Since each pixel is 4 bytes, this gives
1492 * us a ratio of one byte in the CCS for each 8x16 pixels in the
1495 static const struct drm_format_info skl_ccs_formats[] = {
1496 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1497 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1498 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1499 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1500 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1501 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1502 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1503 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1507 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1508 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1509 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1510 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1513 static const struct drm_format_info gen12_ccs_formats[] = {
1514 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1515 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1516 .hsub = 1, .vsub = 1, },
1517 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1518 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1519 .hsub = 1, .vsub = 1, },
1520 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1521 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1522 .hsub = 1, .vsub = 1, .has_alpha = true },
1523 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1524 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1525 .hsub = 1, .vsub = 1, .has_alpha = true },
1526 { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1527 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1528 .hsub = 2, .vsub = 1, .is_yuv = true },
1529 { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1530 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1531 .hsub = 2, .vsub = 1, .is_yuv = true },
1532 { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1533 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1534 .hsub = 2, .vsub = 1, .is_yuv = true },
1535 { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1536 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1537 .hsub = 2, .vsub = 1, .is_yuv = true },
1538 { .format = DRM_FORMAT_NV12, .num_planes = 4,
1539 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1540 .hsub = 2, .vsub = 2, .is_yuv = true },
1541 { .format = DRM_FORMAT_P010, .num_planes = 4,
1542 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1543 .hsub = 2, .vsub = 2, .is_yuv = true },
1544 { .format = DRM_FORMAT_P012, .num_planes = 4,
1545 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1546 .hsub = 2, .vsub = 2, .is_yuv = true },
1547 { .format = DRM_FORMAT_P016, .num_planes = 4,
1548 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1549 .hsub = 2, .vsub = 2, .is_yuv = true },
1553 * Same as gen12_ccs_formats[] above, but with additional surface used
1554 * to pass Clear Color information in plane 2 with 64 bits of data.
1556 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1557 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1558 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1559 .hsub = 1, .vsub = 1, },
1560 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1561 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1562 .hsub = 1, .vsub = 1, },
1563 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1564 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1565 .hsub = 1, .vsub = 1, .has_alpha = true },
1566 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1567 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1568 .hsub = 1, .vsub = 1, .has_alpha = true },
1571 static const struct drm_format_info *
1572 lookup_format_info(const struct drm_format_info formats[],
1573 int num_formats, u32 format)
1577 for (i = 0; i < num_formats; i++) {
1578 if (formats[i].format == format)
1585 static const struct drm_format_info *
1586 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1588 switch (cmd->modifier[0]) {
1589 case I915_FORMAT_MOD_Y_TILED_CCS:
1590 case I915_FORMAT_MOD_Yf_TILED_CCS:
1591 return lookup_format_info(skl_ccs_formats,
1592 ARRAY_SIZE(skl_ccs_formats),
1594 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1595 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1596 return lookup_format_info(gen12_ccs_formats,
1597 ARRAY_SIZE(gen12_ccs_formats),
1599 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1600 return lookup_format_info(gen12_ccs_cc_formats,
1601 ARRAY_SIZE(gen12_ccs_cc_formats),
1608 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1610 return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1614 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1615 u32 pixel_format, u64 modifier)
1617 struct intel_crtc *crtc;
1618 struct intel_plane *plane;
1621 * We assume the primary plane for pipe A has
1622 * the highest stride limits of them all,
1623 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1625 crtc = intel_get_first_crtc(dev_priv);
1629 plane = to_intel_plane(crtc->base.primary);
1631 return plane->max_stride(plane, pixel_format, modifier,
1636 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1637 u32 pixel_format, u64 modifier)
1640 * Arbitrary limit for gen4+ chosen to match the
1641 * render engine max stride.
1643 * The new CCS hash mode makes remapping impossible
1645 if (!is_ccs_modifier(modifier)) {
1646 if (INTEL_GEN(dev_priv) >= 7)
1648 else if (INTEL_GEN(dev_priv) >= 4)
1652 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1656 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1658 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1661 if (is_surface_linear(fb, color_plane)) {
1662 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1667 * To make remapping with linear generally feasible
1668 * we need the stride to be page aligned.
1670 if (fb->pitches[color_plane] > max_stride &&
1671 !is_ccs_modifier(fb->modifier))
1672 return intel_tile_size(dev_priv);
1677 tile_width = intel_tile_width_bytes(fb, color_plane);
1678 if (is_ccs_modifier(fb->modifier)) {
1680 * Display WA #0531: skl,bxt,kbl,glk
1682 * Render decompression and plane width > 3840
1683 * combined with horizontal panning requires the
1684 * plane stride to be a multiple of 4. We'll just
1685 * require the entire fb to accommodate that to avoid
1686 * potential runtime errors at plane configuration time.
1688 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1691 * The main surface pitch must be padded to a multiple of four
1694 else if (INTEL_GEN(dev_priv) >= 12)
1700 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
1702 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1703 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1704 const struct drm_framebuffer *fb = plane_state->hw.fb;
1707 /* We don't want to deal with remapping with cursors */
1708 if (plane->id == PLANE_CURSOR)
1712 * The display engine limits already match/exceed the
1713 * render engine limits, so not much point in remapping.
1714 * Would also need to deal with the fence POT alignment
1715 * and gen2 2KiB GTT tile size.
1717 if (INTEL_GEN(dev_priv) < 4)
1721 * The new CCS hash mode isn't compatible with remapping as
1722 * the virtual address of the pages affects the compressed data.
1724 if (is_ccs_modifier(fb->modifier))
1727 /* Linear needs a page aligned stride for remapping */
1728 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1729 unsigned int alignment = intel_tile_size(dev_priv) - 1;
1731 for (i = 0; i < fb->format->num_planes; i++) {
1732 if (fb->pitches[i] & alignment)
1740 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
1742 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1743 const struct drm_framebuffer *fb = plane_state->hw.fb;
1744 unsigned int rotation = plane_state->hw.rotation;
1745 u32 stride, max_stride;
1748 * No remapping for invisible planes since we don't have
1749 * an actual source viewport to remap.
1751 if (!plane_state->uapi.visible)
1754 if (!intel_plane_can_remap(plane_state))
1758 * FIXME: aux plane limits on gen9+ are
1759 * unclear in Bspec, for now no checking.
1761 stride = intel_fb_pitch(fb, 0, rotation);
1762 max_stride = plane->max_stride(plane, fb->format->format,
1763 fb->modifier, rotation);
1765 return stride > max_stride;
1769 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
1770 const struct drm_framebuffer *fb,
1775 if (color_plane == 0) {
1783 * TODO: Deduct the subsampling from the char block for all CCS
1784 * formats and planes.
1786 if (!is_gen12_ccs_plane(fb, color_plane)) {
1787 *hsub = fb->format->hsub;
1788 *vsub = fb->format->vsub;
1793 main_plane = skl_ccs_to_main_plane(fb, color_plane);
1794 *hsub = drm_format_info_block_width(fb->format, color_plane) /
1795 drm_format_info_block_width(fb->format, main_plane);
1798 * The min stride check in the core framebuffer_check() function
1799 * assumes that format->hsub applies to every plane except for the
1800 * first plane. That's incorrect for the CCS AUX plane of the first
1801 * plane, but for the above check to pass we must define the block
1802 * width with that subsampling applied to it. Adjust the width here
1803 * accordingly, so we can calculate the actual subsampling factor.
1805 if (main_plane == 0)
1806 *hsub *= fb->format->hsub;
1811 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
1813 struct drm_i915_private *i915 = to_i915(fb->dev);
1814 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1817 int tile_width, tile_height;
1821 if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
1824 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
1825 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
1828 tile_height *= vsub;
1830 ccs_x = (x * hsub) % tile_width;
1831 ccs_y = (y * vsub) % tile_height;
1833 main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
1834 main_x = intel_fb->normal[main_plane].x % tile_width;
1835 main_y = intel_fb->normal[main_plane].y % tile_height;
1838 * CCS doesn't have its own x/y offset register, so the intra CCS tile
1839 * x/y offsets must match between CCS and the main surface.
1841 if (main_x != ccs_x || main_y != ccs_y) {
1842 drm_dbg_kms(&i915->drm,
1843 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
1846 intel_fb->normal[main_plane].x,
1847 intel_fb->normal[main_plane].y,
1856 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
1858 int main_plane = is_ccs_plane(fb, color_plane) ?
1859 skl_ccs_to_main_plane(fb, color_plane) : 0;
1860 int main_hsub, main_vsub;
1863 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
1864 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
1865 *w = fb->width / main_hsub / hsub;
1866 *h = fb->height / main_vsub / vsub;
1870 * Setup the rotated view for an FB plane and return the size the GTT mapping
1871 * requires for this view.
1874 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
1875 u32 gtt_offset_rotated, int x, int y,
1876 unsigned int width, unsigned int height,
1877 unsigned int tile_size,
1878 unsigned int tile_width, unsigned int tile_height,
1879 struct drm_framebuffer *fb)
1881 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1882 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
1883 unsigned int pitch_tiles;
1886 /* Y or Yf modifiers required for 90/270 rotation */
1887 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
1888 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
1891 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
1894 rot_info->plane[plane] = *plane_info;
1896 intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
1898 /* rotate the x/y offsets to match the GTT view */
1899 drm_rect_init(&r, x, y, width, height);
1901 plane_info->width * tile_width,
1902 plane_info->height * tile_height,
1903 DRM_MODE_ROTATE_270);
1907 /* rotate the tile dimensions to match the GTT view */
1908 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
1909 swap(tile_width, tile_height);
1912 * We only keep the x/y offsets, so push all of the
1913 * gtt offset into the x/y offsets.
1915 intel_adjust_tile_offset(&x, &y,
1916 tile_width, tile_height,
1917 tile_size, pitch_tiles,
1918 gtt_offset_rotated * tile_size, 0);
1921 * First pixel of the framebuffer from
1922 * the start of the rotated gtt mapping.
1924 intel_fb->rotated[plane].x = x;
1925 intel_fb->rotated[plane].y = y;
1927 return plane_info->width * plane_info->height;
1931 intel_fill_fb_info(struct drm_i915_private *dev_priv,
1932 struct drm_framebuffer *fb)
1934 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1935 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1936 u32 gtt_offset_rotated = 0;
1937 unsigned int max_size = 0;
1938 int i, num_planes = fb->format->num_planes;
1939 unsigned int tile_size = intel_tile_size(dev_priv);
1941 for (i = 0; i < num_planes; i++) {
1942 unsigned int width, height;
1943 unsigned int cpp, size;
1949 * Plane 2 of Render Compression with Clear Color fb modifier
1950 * is consumed by the driver and not passed to DE. Skip the
1951 * arithmetic related to alignment and offset calculation.
1953 if (is_gen12_ccs_cc_plane(fb, i)) {
1954 if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
1960 cpp = fb->format->cpp[i];
1961 intel_fb_plane_dims(&width, &height, fb, i);
1963 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
1965 drm_dbg_kms(&dev_priv->drm,
1966 "bad fb plane %d offset: 0x%x\n",
1971 ret = intel_fb_check_ccs_xy(fb, i, x, y);
1976 * The fence (if used) is aligned to the start of the object
1977 * so having the framebuffer wrap around across the edge of the
1978 * fenced region doesn't really work. We have no API to configure
1979 * the fence start offset within the object (nor could we probably
1980 * on gen2/3). So it's just easier if we just require that the
1981 * fb layout agrees with the fence layout. We already check that the
1982 * fb stride matches the fence stride elsewhere.
1984 if (i == 0 && i915_gem_object_is_tiled(obj) &&
1985 (x + width) * cpp > fb->pitches[i]) {
1986 drm_dbg_kms(&dev_priv->drm,
1987 "bad fb plane %d offset: 0x%x\n",
1993 * First pixel of the framebuffer from
1994 * the start of the normal gtt mapping.
1996 intel_fb->normal[i].x = x;
1997 intel_fb->normal[i].y = y;
1999 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2003 offset /= tile_size;
2005 if (!is_surface_linear(fb, i)) {
2006 struct intel_remapped_plane_info plane_info;
2007 unsigned int tile_width, tile_height;
2009 intel_tile_dims(fb, i, &tile_width, &tile_height);
2011 plane_info.offset = offset;
2012 plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
2014 plane_info.width = DIV_ROUND_UP(x + width, tile_width);
2015 plane_info.height = DIV_ROUND_UP(y + height,
2018 /* how many tiles does this plane need */
2019 size = plane_info.stride * plane_info.height;
2021 * If the plane isn't horizontally tile aligned,
2022 * we need one more tile.
2027 gtt_offset_rotated +=
2028 setup_fb_rotation(i, &plane_info,
2030 x, y, width, height,
2032 tile_width, tile_height,
2035 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2036 x * cpp, tile_size);
2039 /* how many tiles in total needed in the bo */
2040 max_size = max(max_size, offset + size);
2043 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2044 drm_dbg_kms(&dev_priv->drm,
2045 "fb too big for bo (need %llu bytes, have %zu bytes)\n",
2046 mul_u32_u32(max_size, tile_size), obj->base.size);
2054 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2056 struct drm_i915_private *dev_priv =
2057 to_i915(plane_state->uapi.plane->dev);
2058 struct drm_framebuffer *fb = plane_state->hw.fb;
2059 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2060 struct intel_rotation_info *info = &plane_state->view.rotated;
2061 unsigned int rotation = plane_state->hw.rotation;
2062 int i, num_planes = fb->format->num_planes;
2063 unsigned int tile_size = intel_tile_size(dev_priv);
2064 unsigned int src_x, src_y;
2065 unsigned int src_w, src_h;
2068 memset(&plane_state->view, 0, sizeof(plane_state->view));
2069 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2070 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2072 src_x = plane_state->uapi.src.x1 >> 16;
2073 src_y = plane_state->uapi.src.y1 >> 16;
2074 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2075 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2077 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
2079 /* Make src coordinates relative to the viewport */
2080 drm_rect_translate(&plane_state->uapi.src,
2081 -(src_x << 16), -(src_y << 16));
2083 /* Rotate src coordinates to match rotated GTT view */
2084 if (drm_rotation_90_or_270(rotation))
2085 drm_rect_rotate(&plane_state->uapi.src,
2086 src_w << 16, src_h << 16,
2087 DRM_MODE_ROTATE_270);
2089 for (i = 0; i < num_planes; i++) {
2090 unsigned int hsub = i ? fb->format->hsub : 1;
2091 unsigned int vsub = i ? fb->format->vsub : 1;
2092 unsigned int cpp = fb->format->cpp[i];
2093 unsigned int tile_width, tile_height;
2094 unsigned int width, height;
2095 unsigned int pitch_tiles;
2099 intel_tile_dims(fb, i, &tile_width, &tile_height);
2103 width = src_w / hsub;
2104 height = src_h / vsub;
2107 * First pixel of the src viewport from the
2108 * start of the normal gtt mapping.
2110 x += intel_fb->normal[i].x;
2111 y += intel_fb->normal[i].y;
2113 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2114 fb, i, fb->pitches[i],
2115 DRM_MODE_ROTATE_0, tile_size);
2116 offset /= tile_size;
2118 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
2119 info->plane[i].offset = offset;
2120 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2122 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2123 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2125 if (drm_rotation_90_or_270(rotation)) {
2128 /* rotate the x/y offsets to match the GTT view */
2129 drm_rect_init(&r, x, y, width, height);
2131 info->plane[i].width * tile_width,
2132 info->plane[i].height * tile_height,
2133 DRM_MODE_ROTATE_270);
2137 pitch_tiles = info->plane[i].height;
2138 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2140 /* rotate the tile dimensions to match the GTT view */
2141 swap(tile_width, tile_height);
2143 pitch_tiles = info->plane[i].width;
2144 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2148 * We only keep the x/y offsets, so push all of the
2149 * gtt offset into the x/y offsets.
2151 intel_adjust_tile_offset(&x, &y,
2152 tile_width, tile_height,
2153 tile_size, pitch_tiles,
2154 gtt_offset * tile_size, 0);
2156 gtt_offset += info->plane[i].width * info->plane[i].height;
2158 plane_state->color_plane[i].offset = 0;
2159 plane_state->color_plane[i].x = x;
2160 plane_state->color_plane[i].y = y;
2165 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2167 const struct intel_framebuffer *fb =
2168 to_intel_framebuffer(plane_state->hw.fb);
2169 unsigned int rotation = plane_state->hw.rotation;
2175 num_planes = fb->base.format->num_planes;
2177 if (intel_plane_needs_remap(plane_state)) {
2178 intel_plane_remap_gtt(plane_state);
2181 * Sometimes even remapping can't overcome
2182 * the stride limitations :( Can happen with
2183 * big plane sizes and suitably misaligned
2186 return intel_plane_check_stride(plane_state);
2189 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2191 for (i = 0; i < num_planes; i++) {
2192 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2193 plane_state->color_plane[i].offset = 0;
2195 if (drm_rotation_90_or_270(rotation)) {
2196 plane_state->color_plane[i].x = fb->rotated[i].x;
2197 plane_state->color_plane[i].y = fb->rotated[i].y;
2199 plane_state->color_plane[i].x = fb->normal[i].x;
2200 plane_state->color_plane[i].y = fb->normal[i].y;
2204 /* Rotate src coordinates to match rotated GTT view */
2205 if (drm_rotation_90_or_270(rotation))
2206 drm_rect_rotate(&plane_state->uapi.src,
2207 fb->base.width << 16, fb->base.height << 16,
2208 DRM_MODE_ROTATE_270);
2210 return intel_plane_check_stride(plane_state);
2213 static struct i915_vma *
2214 initial_plane_vma(struct drm_i915_private *i915,
2215 struct intel_initial_plane_config *plane_config)
2217 struct drm_i915_gem_object *obj;
2218 struct i915_vma *vma;
2221 if (plane_config->size == 0)
2224 base = round_down(plane_config->base,
2225 I915_GTT_MIN_ALIGNMENT);
2226 size = round_up(plane_config->base + plane_config->size,
2227 I915_GTT_MIN_ALIGNMENT);
2231 * If the FB is too big, just don't use it since fbdev is not very
2232 * important and we should probably use that space with FBC or other
2235 if (size * 2 > i915->stolen_usable_size)
2238 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
2243 * Mark it WT ahead of time to avoid changing the
2244 * cache_level during fbdev initialization. The
2245 * unbind there would get stuck waiting for rcu.
2247 i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
2248 I915_CACHE_WT : I915_CACHE_NONE);
2250 switch (plane_config->tiling) {
2251 case I915_TILING_NONE:
2255 obj->tiling_and_stride =
2256 plane_config->fb->base.pitches[0] |
2257 plane_config->tiling;
2260 MISSING_CASE(plane_config->tiling);
2264 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
2268 if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
2271 if (i915_gem_object_is_tiled(obj) &&
2272 !i915_vma_is_map_and_fenceable(vma))
2278 i915_gem_object_put(obj);
2283 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2284 struct intel_initial_plane_config *plane_config)
2286 struct drm_device *dev = crtc->base.dev;
2287 struct drm_i915_private *dev_priv = to_i915(dev);
2288 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2289 struct drm_framebuffer *fb = &plane_config->fb->base;
2290 struct i915_vma *vma;
2292 switch (fb->modifier) {
2293 case DRM_FORMAT_MOD_LINEAR:
2294 case I915_FORMAT_MOD_X_TILED:
2295 case I915_FORMAT_MOD_Y_TILED:
2298 drm_dbg(&dev_priv->drm,
2299 "Unsupported modifier for initial FB: 0x%llx\n",
2304 vma = initial_plane_vma(dev_priv, plane_config);
2308 mode_cmd.pixel_format = fb->format->format;
2309 mode_cmd.width = fb->width;
2310 mode_cmd.height = fb->height;
2311 mode_cmd.pitches[0] = fb->pitches[0];
2312 mode_cmd.modifier[0] = fb->modifier;
2313 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2315 if (intel_framebuffer_init(to_intel_framebuffer(fb),
2316 vma->obj, &mode_cmd)) {
2317 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
2321 plane_config->vma = vma;
2330 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2331 struct intel_plane_state *plane_state,
2334 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2336 plane_state->uapi.visible = visible;
2339 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
2341 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
2344 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
2346 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2347 struct drm_plane *plane;
2350 * Active_planes aliases if multiple "primary" or cursor planes
2351 * have been used on the same (or wrong) pipe. plane_mask uses
2352 * unique ids, hence we can use that to reconstruct active_planes.
2354 crtc_state->enabled_planes = 0;
2355 crtc_state->active_planes = 0;
2357 drm_for_each_plane_mask(plane, &dev_priv->drm,
2358 crtc_state->uapi.plane_mask) {
2359 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
2360 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2364 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2365 struct intel_plane *plane)
2367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2368 struct intel_crtc_state *crtc_state =
2369 to_intel_crtc_state(crtc->base.state);
2370 struct intel_plane_state *plane_state =
2371 to_intel_plane_state(plane->base.state);
2373 drm_dbg_kms(&dev_priv->drm,
2374 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2375 plane->base.base.id, plane->base.name,
2376 crtc->base.base.id, crtc->base.name);
2378 intel_set_plane_visible(crtc_state, plane_state, false);
2379 fixup_plane_bitmasks(crtc_state);
2380 crtc_state->data_rate[plane->id] = 0;
2381 crtc_state->min_cdclk[plane->id] = 0;
2383 if (plane->id == PLANE_PRIMARY)
2384 hsw_disable_ips(crtc_state);
2387 * Vblank time updates from the shadow to live plane control register
2388 * are blocked if the memory self-refresh mode is active at that
2389 * moment. So to make sure the plane gets truly disabled, disable
2390 * first the self-refresh mode. The self-refresh enable bit in turn
2391 * will be checked/applied by the HW only at the next frame start
2392 * event which is after the vblank start event, so we need to have a
2393 * wait-for-vblank between disabling the plane and the pipe.
2395 if (HAS_GMCH(dev_priv) &&
2396 intel_set_memory_cxsr(dev_priv, false))
2397 intel_wait_for_vblank(dev_priv, crtc->pipe);
2400 * Gen2 reports pipe underruns whenever all planes are disabled.
2401 * So disable underrun reporting before all the planes get disabled.
2403 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
2404 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
2406 intel_disable_plane(plane, crtc_state);
2410 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2411 struct intel_initial_plane_config *plane_config)
2413 struct drm_device *dev = intel_crtc->base.dev;
2414 struct drm_i915_private *dev_priv = to_i915(dev);
2416 struct drm_plane *primary = intel_crtc->base.primary;
2417 struct drm_plane_state *plane_state = primary->state;
2418 struct intel_plane *intel_plane = to_intel_plane(primary);
2419 struct intel_plane_state *intel_state =
2420 to_intel_plane_state(plane_state);
2421 struct intel_crtc_state *crtc_state =
2422 to_intel_crtc_state(intel_crtc->base.state);
2423 struct drm_framebuffer *fb;
2424 struct i915_vma *vma;
2426 if (!plane_config->fb)
2429 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2430 fb = &plane_config->fb->base;
2431 vma = plane_config->vma;
2436 * Failed to alloc the obj, check to see if we should share
2437 * an fb with another CRTC instead
2439 for_each_crtc(dev, c) {
2440 struct intel_plane_state *state;
2442 if (c == &intel_crtc->base)
2445 if (!to_intel_crtc_state(c->state)->uapi.active)
2448 state = to_intel_plane_state(c->primary->state);
2452 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2460 * We've failed to reconstruct the BIOS FB. Current display state
2461 * indicates that the primary plane is visible, but has a NULL FB,
2462 * which will lead to problems later if we don't fix it up. The
2463 * simplest solution is to just disable the primary plane now and
2464 * pretend the BIOS never had it enabled.
2466 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2467 if (crtc_state->bigjoiner) {
2468 struct intel_crtc *slave =
2469 crtc_state->bigjoiner_linked_crtc;
2470 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2476 intel_state->hw.rotation = plane_config->rotation;
2477 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2478 intel_state->hw.rotation);
2479 intel_state->color_plane[0].stride =
2480 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
2482 __i915_vma_pin(vma);
2483 intel_state->vma = i915_vma_get(vma);
2484 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
2486 intel_state->flags |= PLANE_HAS_FENCE;
2488 plane_state->src_x = 0;
2489 plane_state->src_y = 0;
2490 plane_state->src_w = fb->width << 16;
2491 plane_state->src_h = fb->height << 16;
2493 plane_state->crtc_x = 0;
2494 plane_state->crtc_y = 0;
2495 plane_state->crtc_w = fb->width;
2496 plane_state->crtc_h = fb->height;
2498 intel_state->uapi.src = drm_plane_state_src(plane_state);
2499 intel_state->uapi.dst = drm_plane_state_dest(plane_state);
2501 if (plane_config->tiling)
2502 dev_priv->preserve_bios_swizzle = true;
2504 plane_state->fb = fb;
2505 drm_framebuffer_get(fb);
2507 plane_state->crtc = &intel_crtc->base;
2508 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2511 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2513 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2514 &to_intel_frontbuffer(fb)->bits);
2518 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2522 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2523 plane_state->color_plane[0].offset, 0);
2529 __intel_display_resume(struct drm_device *dev,
2530 struct drm_atomic_state *state,
2531 struct drm_modeset_acquire_ctx *ctx)
2533 struct drm_crtc_state *crtc_state;
2534 struct drm_crtc *crtc;
2537 intel_modeset_setup_hw_state(dev, ctx);
2538 intel_vga_redisable(to_i915(dev));
2544 * We've duplicated the state, pointers to the old state are invalid.
2546 * Don't attempt to use the old state until we commit the duplicated state.
2548 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2550 * Force recalculation even if we restore
2551 * current state. With fast modeset this may not result
2552 * in a modeset when the state is compatible.
2554 crtc_state->mode_changed = true;
2557 /* ignore any reset values/BIOS leftovers in the WM registers */
2558 if (!HAS_GMCH(to_i915(dev)))
2559 to_intel_atomic_state(state)->skip_intermediate_wm = true;
2561 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2563 drm_WARN_ON(dev, ret == -EDEADLK);
2567 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2569 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2570 intel_has_gpu_reset(&dev_priv->gt));
2573 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2575 struct drm_device *dev = &dev_priv->drm;
2576 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2577 struct drm_atomic_state *state;
2580 if (!HAS_DISPLAY(dev_priv))
2583 /* reset doesn't touch the display */
2584 if (!dev_priv->params.force_reset_modeset_test &&
2585 !gpu_reset_clobbers_display(dev_priv))
2588 /* We have a modeset vs reset deadlock, defensively unbreak it. */
2589 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2590 smp_mb__after_atomic();
2591 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2593 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2594 drm_dbg_kms(&dev_priv->drm,
2595 "Modeset potentially stuck, unbreaking through wedging\n");
2596 intel_gt_set_wedged(&dev_priv->gt);
2600 * Need mode_config.mutex so that we don't
2601 * trample ongoing ->detect() and whatnot.
2603 mutex_lock(&dev->mode_config.mutex);
2604 drm_modeset_acquire_init(ctx, 0);
2606 ret = drm_modeset_lock_all_ctx(dev, ctx);
2607 if (ret != -EDEADLK)
2610 drm_modeset_backoff(ctx);
2613 * Disabling the crtcs gracefully seems nicer. Also the
2614 * g33 docs say we should at least disable all the planes.
2616 state = drm_atomic_helper_duplicate_state(dev, ctx);
2617 if (IS_ERR(state)) {
2618 ret = PTR_ERR(state);
2619 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2624 ret = drm_atomic_helper_disable_all(dev, ctx);
2626 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2628 drm_atomic_state_put(state);
2632 dev_priv->modeset_restore_state = state;
2633 state->acquire_ctx = ctx;
2636 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2638 struct drm_device *dev = &dev_priv->drm;
2639 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2640 struct drm_atomic_state *state;
2643 if (!HAS_DISPLAY(dev_priv))
2646 /* reset doesn't touch the display */
2647 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2650 state = fetch_and_zero(&dev_priv->modeset_restore_state);
2654 /* reset doesn't touch the display */
2655 if (!gpu_reset_clobbers_display(dev_priv)) {
2656 /* for testing only restore the display */
2657 ret = __intel_display_resume(dev, state, ctx);
2659 drm_err(&dev_priv->drm,
2660 "Restoring old state failed with %i\n", ret);
2663 * The display has been reset as well,
2664 * so need a full re-initialization.
2666 intel_pps_unlock_regs_wa(dev_priv);
2667 intel_modeset_init_hw(dev_priv);
2668 intel_init_clock_gating(dev_priv);
2669 intel_hpd_init(dev_priv);
2671 ret = __intel_display_resume(dev, state, ctx);
2673 drm_err(&dev_priv->drm,
2674 "Restoring old state failed with %i\n", ret);
2676 intel_hpd_poll_disable(dev_priv);
2679 drm_atomic_state_put(state);
2681 drm_modeset_drop_locks(ctx);
2682 drm_modeset_acquire_fini(ctx);
2683 mutex_unlock(&dev->mode_config.mutex);
2685 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2688 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2690 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2691 enum pipe pipe = crtc->pipe;
2694 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2697 * Display WA #1153: icl
2698 * enable hardware to bypass the alpha math
2699 * and rounding for per-pixel values 00 and 0xff
2701 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2703 * Display WA # 1605353570: icl
2704 * Set the pixel rounding bit to 1 for allowing
2705 * passthrough of Frame buffer pixels unmodified
2708 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2709 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2712 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2714 struct drm_crtc *crtc;
2717 drm_for_each_crtc(crtc, &dev_priv->drm) {
2718 struct drm_crtc_commit *commit;
2719 spin_lock(&crtc->commit_lock);
2720 commit = list_first_entry_or_null(&crtc->commit_list,
2721 struct drm_crtc_commit, commit_entry);
2722 cleanup_done = commit ?
2723 try_wait_for_completion(&commit->cleanup_done) : true;
2724 spin_unlock(&crtc->commit_lock);
2729 drm_crtc_wait_one_vblank(crtc);
2737 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2741 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2743 mutex_lock(&dev_priv->sb_lock);
2745 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2746 temp |= SBI_SSCCTL_DISABLE;
2747 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2749 mutex_unlock(&dev_priv->sb_lock);
2752 /* Program iCLKIP clock to the desired frequency */
2753 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2755 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2757 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2758 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2761 lpt_disable_iclkip(dev_priv);
2763 /* The iCLK virtual clock root frequency is in MHz,
2764 * but the adjusted_mode->crtc_clock in in KHz. To get the
2765 * divisors, it is necessary to divide one by another, so we
2766 * convert the virtual clock precision to KHz here for higher
2769 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2770 u32 iclk_virtual_root_freq = 172800 * 1000;
2771 u32 iclk_pi_range = 64;
2772 u32 desired_divisor;
2774 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2776 divsel = (desired_divisor / iclk_pi_range) - 2;
2777 phaseinc = desired_divisor % iclk_pi_range;
2780 * Near 20MHz is a corner case which is
2781 * out of range for the 7-bit divisor
2787 /* This should not happen with any sane values */
2788 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2789 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2790 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2791 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2793 drm_dbg_kms(&dev_priv->drm,
2794 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2795 clock, auxdiv, divsel, phasedir, phaseinc);
2797 mutex_lock(&dev_priv->sb_lock);
2799 /* Program SSCDIVINTPHASE6 */
2800 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2801 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2802 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2803 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2804 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2805 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2806 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2807 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2809 /* Program SSCAUXDIV */
2810 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2811 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2812 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2813 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2815 /* Enable modulator and associated divider */
2816 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2817 temp &= ~SBI_SSCCTL_DISABLE;
2818 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2820 mutex_unlock(&dev_priv->sb_lock);
2822 /* Wait for initialization time */
2825 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2828 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2830 u32 divsel, phaseinc, auxdiv;
2831 u32 iclk_virtual_root_freq = 172800 * 1000;
2832 u32 iclk_pi_range = 64;
2833 u32 desired_divisor;
2836 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2839 mutex_lock(&dev_priv->sb_lock);
2841 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2842 if (temp & SBI_SSCCTL_DISABLE) {
2843 mutex_unlock(&dev_priv->sb_lock);
2847 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2848 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2849 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2850 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2851 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2853 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2854 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2855 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2857 mutex_unlock(&dev_priv->sb_lock);
2859 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2861 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2862 desired_divisor << auxdiv);
2865 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2866 enum pipe pch_transcoder)
2868 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2869 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2870 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2872 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2873 intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2874 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2875 intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2876 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2877 intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2879 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2880 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2881 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2882 intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2883 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2884 intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2885 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2886 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2889 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2893 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2894 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2897 drm_WARN_ON(&dev_priv->drm,
2898 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2900 drm_WARN_ON(&dev_priv->drm,
2901 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2904 temp &= ~FDI_BC_BIFURCATION_SELECT;
2906 temp |= FDI_BC_BIFURCATION_SELECT;
2908 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2909 enable ? "en" : "dis");
2910 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2911 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2914 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2916 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2917 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2919 switch (crtc->pipe) {
2923 if (crtc_state->fdi_lanes > 2)
2924 cpt_set_fdi_bc_bifurcation(dev_priv, false);
2926 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2930 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2939 * Finds the encoder associated with the given CRTC. This can only be
2940 * used when we know that the CRTC isn't feeding multiple encoders!
2942 struct intel_encoder *
2943 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2944 const struct intel_crtc_state *crtc_state)
2946 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2947 const struct drm_connector_state *connector_state;
2948 const struct drm_connector *connector;
2949 struct intel_encoder *encoder = NULL;
2950 int num_encoders = 0;
2953 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2954 if (connector_state->crtc != &crtc->base)
2957 encoder = to_intel_encoder(connector_state->best_encoder);
2961 drm_WARN(encoder->base.dev, num_encoders != 1,
2962 "%d encoders for pipe %c\n",
2963 num_encoders, pipe_name(crtc->pipe));
2969 * Enable PCH resources required for PCH ports:
2971 * - FDI training & RX/TX
2972 * - update transcoder timings
2973 * - DP transcoding bits
2976 static void ilk_pch_enable(const struct intel_atomic_state *state,
2977 const struct intel_crtc_state *crtc_state)
2979 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2980 struct drm_device *dev = crtc->base.dev;
2981 struct drm_i915_private *dev_priv = to_i915(dev);
2982 enum pipe pipe = crtc->pipe;
2985 assert_pch_transcoder_disabled(dev_priv, pipe);
2987 if (IS_IVYBRIDGE(dev_priv))
2988 ivb_update_fdi_bc_bifurcation(crtc_state);
2990 /* Write the TU size bits before fdi link training, so that error
2991 * detection works. */
2992 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2993 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2995 /* For PCH output, training FDI link */
2996 dev_priv->display.fdi_link_train(crtc, crtc_state);
2998 /* We need to program the right clock selection before writing the pixel
2999 * mutliplier into the DPLL. */
3000 if (HAS_PCH_CPT(dev_priv)) {
3003 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3004 temp |= TRANS_DPLL_ENABLE(pipe);
3005 sel = TRANS_DPLLB_SEL(pipe);
3006 if (crtc_state->shared_dpll ==
3007 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
3011 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3014 /* XXX: pch pll's can be enabled any time before we enable the PCH
3015 * transcoder, and we actually should do this to not upset any PCH
3016 * transcoder that already use the clock when we share it.
3018 * Note that enable_shared_dpll tries to do the right thing, but
3019 * get_shared_dpll unconditionally resets the pll - we need that to have
3020 * the right LVDS enable sequence. */
3021 intel_enable_shared_dpll(crtc_state);
3023 /* set transcoder timing, panel must allow it */
3024 assert_panel_unlocked(dev_priv, pipe);
3025 ilk_pch_transcoder_set_timings(crtc_state, pipe);
3027 intel_fdi_normal_train(crtc);
3029 /* For PCH DP, enable TRANS_DP_CTL */
3030 if (HAS_PCH_CPT(dev_priv) &&
3031 intel_crtc_has_dp_encoder(crtc_state)) {
3032 const struct drm_display_mode *adjusted_mode =
3033 &crtc_state->hw.adjusted_mode;
3034 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3035 i915_reg_t reg = TRANS_DP_CTL(pipe);
3038 temp = intel_de_read(dev_priv, reg);
3039 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3040 TRANS_DP_SYNC_MASK |
3042 temp |= TRANS_DP_OUTPUT_ENABLE;
3043 temp |= bpc << 9; /* same format but at 11:9 */
3045 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
3046 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3047 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
3048 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3050 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
3051 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
3052 temp |= TRANS_DP_PORT_SEL(port);
3054 intel_de_write(dev_priv, reg, temp);
3057 ilk_enable_pch_transcoder(crtc_state);
3060 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
3062 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3063 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3064 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3066 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
3068 lpt_program_iclkip(crtc_state);
3070 /* Set transcoder timing. */
3071 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
3073 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3076 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
3079 i915_reg_t dslreg = PIPEDSL(pipe);
3082 temp = intel_de_read(dev_priv, dslreg);
3084 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
3085 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
3086 drm_err(&dev_priv->drm,
3087 "mode set failed: pipe %c stuck\n",
3092 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
3094 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3095 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3096 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3097 enum pipe pipe = crtc->pipe;
3098 int width = drm_rect_width(dst);
3099 int height = drm_rect_height(dst);
3103 if (!crtc_state->pch_pfit.enabled)
3106 /* Force use of hard-coded filter coefficients
3107 * as some pre-programmed values are broken,
3110 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3111 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3112 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
3114 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3116 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
3117 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
3120 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
3122 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3123 struct drm_device *dev = crtc->base.dev;
3124 struct drm_i915_private *dev_priv = to_i915(dev);
3126 if (!crtc_state->ips_enabled)
3130 * We can only enable IPS after we enable a plane and wait for a vblank
3131 * This function is called from post_plane_update, which is run after
3134 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
3136 if (IS_BROADWELL(dev_priv)) {
3137 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
3138 IPS_ENABLE | IPS_PCODE_CONTROL));
3139 /* Quoting Art Runyan: "its not safe to expect any particular
3140 * value in IPS_CTL bit 31 after enabling IPS through the
3141 * mailbox." Moreover, the mailbox may return a bogus state,
3142 * so we need to just enable it and continue on.
3145 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
3146 /* The bit only becomes 1 in the next vblank, so this wait here
3147 * is essentially intel_wait_for_vblank. If we don't have this
3148 * and don't wait for vblanks until the end of crtc_enable, then
3149 * the HW state readout code will complain that the expected
3150 * IPS_CTL value is not the one we read. */
3151 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
3152 drm_err(&dev_priv->drm,
3153 "Timed out waiting for IPS enable\n");
3157 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
3159 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3160 struct drm_device *dev = crtc->base.dev;
3161 struct drm_i915_private *dev_priv = to_i915(dev);
3163 if (!crtc_state->ips_enabled)
3166 if (IS_BROADWELL(dev_priv)) {
3168 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3170 * Wait for PCODE to finish disabling IPS. The BSpec specified
3171 * 42ms timeout value leads to occasional timeouts so use 100ms
3174 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
3175 drm_err(&dev_priv->drm,
3176 "Timed out waiting for IPS disable\n");
3178 intel_de_write(dev_priv, IPS_CTL, 0);
3179 intel_de_posting_read(dev_priv, IPS_CTL);
3182 /* We need to wait for a vblank before we can disable the plane. */
3183 intel_wait_for_vblank(dev_priv, crtc->pipe);
3186 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
3188 if (intel_crtc->overlay)
3189 (void) intel_overlay_switch_off(intel_crtc->overlay);
3191 /* Let userspace switch the overlay on again. In most cases userspace
3192 * has to recompute where to put it anyway.
3196 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
3197 const struct intel_crtc_state *new_crtc_state)
3199 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3202 if (!old_crtc_state->ips_enabled)
3205 if (intel_crtc_needs_modeset(new_crtc_state))
3209 * Workaround : Do not read or write the pipe palette/gamma data while
3210 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3212 * Disable IPS before we program the LUT.
3214 if (IS_HASWELL(dev_priv) &&
3215 (new_crtc_state->uapi.color_mgmt_changed ||
3216 new_crtc_state->update_pipe) &&
3217 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3220 return !new_crtc_state->ips_enabled;
3223 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
3224 const struct intel_crtc_state *new_crtc_state)
3226 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3227 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3229 if (!new_crtc_state->ips_enabled)
3232 if (intel_crtc_needs_modeset(new_crtc_state))
3236 * Workaround : Do not read or write the pipe palette/gamma data while
3237 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3239 * Re-enable IPS after the LUT has been programmed.
3241 if (IS_HASWELL(dev_priv) &&
3242 (new_crtc_state->uapi.color_mgmt_changed ||
3243 new_crtc_state->update_pipe) &&
3244 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3248 * We can't read out IPS on broadwell, assume the worst and
3249 * forcibly enable IPS on the first fastset.
3251 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
3254 return !old_crtc_state->ips_enabled;
3257 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
3259 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3261 if (!crtc_state->nv12_planes)
3264 /* WA Display #0827: Gen9:all */
3265 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
3271 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
3273 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3275 /* Wa_2006604312:icl,ehl */
3276 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
3282 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
3283 const struct intel_crtc_state *new_crtc_state)
3285 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
3286 new_crtc_state->active_planes;
3289 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
3290 const struct intel_crtc_state *new_crtc_state)
3292 return old_crtc_state->active_planes &&
3293 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
3296 static void intel_post_plane_update(struct intel_atomic_state *state,
3297 struct intel_crtc *crtc)
3299 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3300 const struct intel_crtc_state *old_crtc_state =
3301 intel_atomic_get_old_crtc_state(state, crtc);
3302 const struct intel_crtc_state *new_crtc_state =
3303 intel_atomic_get_new_crtc_state(state, crtc);
3304 enum pipe pipe = crtc->pipe;
3306 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
3308 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
3309 intel_update_watermarks(crtc);
3311 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
3312 hsw_enable_ips(new_crtc_state);
3314 intel_fbc_post_update(state, crtc);
3316 if (needs_nv12_wa(old_crtc_state) &&
3317 !needs_nv12_wa(new_crtc_state))
3318 skl_wa_827(dev_priv, pipe, false);
3320 if (needs_scalerclk_wa(old_crtc_state) &&
3321 !needs_scalerclk_wa(new_crtc_state))
3322 icl_wa_scalerclkgating(dev_priv, pipe, false);
3325 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
3326 struct intel_crtc *crtc)
3328 const struct intel_crtc_state *crtc_state =
3329 intel_atomic_get_new_crtc_state(state, crtc);
3330 u8 update_planes = crtc_state->update_planes;
3331 const struct intel_plane_state *plane_state;
3332 struct intel_plane *plane;
3335 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3336 if (plane->enable_flip_done &&
3337 plane->pipe == crtc->pipe &&
3338 update_planes & BIT(plane->id))
3339 plane->enable_flip_done(plane);
3343 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
3344 struct intel_crtc *crtc)
3346 const struct intel_crtc_state *crtc_state =
3347 intel_atomic_get_new_crtc_state(state, crtc);
3348 u8 update_planes = crtc_state->update_planes;
3349 const struct intel_plane_state *plane_state;
3350 struct intel_plane *plane;
3353 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3354 if (plane->disable_flip_done &&
3355 plane->pipe == crtc->pipe &&
3356 update_planes & BIT(plane->id))
3357 plane->disable_flip_done(plane);
3361 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
3362 struct intel_crtc *crtc)
3364 struct drm_i915_private *i915 = to_i915(state->base.dev);
3365 const struct intel_crtc_state *old_crtc_state =
3366 intel_atomic_get_old_crtc_state(state, crtc);
3367 const struct intel_crtc_state *new_crtc_state =
3368 intel_atomic_get_new_crtc_state(state, crtc);
3369 u8 update_planes = new_crtc_state->update_planes;
3370 const struct intel_plane_state *old_plane_state;
3371 struct intel_plane *plane;
3372 bool need_vbl_wait = false;
3375 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3376 if (plane->need_async_flip_disable_wa &&
3377 plane->pipe == crtc->pipe &&
3378 update_planes & BIT(plane->id)) {
3380 * Apart from the async flip bit we want to
3381 * preserve the old state for the plane.
3383 plane->async_flip(plane, old_crtc_state,
3384 old_plane_state, false);
3385 need_vbl_wait = true;
3390 intel_wait_for_vblank(i915, crtc->pipe);
3393 static void intel_pre_plane_update(struct intel_atomic_state *state,
3394 struct intel_crtc *crtc)
3396 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3397 const struct intel_crtc_state *old_crtc_state =
3398 intel_atomic_get_old_crtc_state(state, crtc);
3399 const struct intel_crtc_state *new_crtc_state =
3400 intel_atomic_get_new_crtc_state(state, crtc);
3401 enum pipe pipe = crtc->pipe;
3403 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
3404 hsw_disable_ips(old_crtc_state);
3406 if (intel_fbc_pre_update(state, crtc))
3407 intel_wait_for_vblank(dev_priv, pipe);
3409 /* Display WA 827 */
3410 if (!needs_nv12_wa(old_crtc_state) &&
3411 needs_nv12_wa(new_crtc_state))
3412 skl_wa_827(dev_priv, pipe, true);
3414 /* Wa_2006604312:icl,ehl */
3415 if (!needs_scalerclk_wa(old_crtc_state) &&
3416 needs_scalerclk_wa(new_crtc_state))
3417 icl_wa_scalerclkgating(dev_priv, pipe, true);
3420 * Vblank time updates from the shadow to live plane control register
3421 * are blocked if the memory self-refresh mode is active at that
3422 * moment. So to make sure the plane gets truly disabled, disable
3423 * first the self-refresh mode. The self-refresh enable bit in turn
3424 * will be checked/applied by the HW only at the next frame start
3425 * event which is after the vblank start event, so we need to have a
3426 * wait-for-vblank between disabling the plane and the pipe.
3428 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
3429 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
3430 intel_wait_for_vblank(dev_priv, pipe);
3433 * IVB workaround: must disable low power watermarks for at least
3434 * one frame before enabling scaling. LP watermarks can be re-enabled
3435 * when scaling is disabled.
3437 * WaCxSRDisabledForSpriteScaling:ivb
3439 if (old_crtc_state->hw.active &&
3440 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
3441 intel_wait_for_vblank(dev_priv, pipe);
3444 * If we're doing a modeset we don't need to do any
3445 * pre-vblank watermark programming here.
3447 if (!intel_crtc_needs_modeset(new_crtc_state)) {
3449 * For platforms that support atomic watermarks, program the
3450 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
3451 * will be the intermediate values that are safe for both pre- and
3452 * post- vblank; when vblank happens, the 'active' values will be set
3453 * to the final 'target' values and we'll do this again to get the
3454 * optimal watermarks. For gen9+ platforms, the values we program here
3455 * will be the final target values which will get automatically latched
3456 * at vblank time; no further programming will be necessary.
3458 * If a platform hasn't been transitioned to atomic watermarks yet,
3459 * we'll continue to update watermarks the old way, if flags tell
3462 if (dev_priv->display.initial_watermarks)
3463 dev_priv->display.initial_watermarks(state, crtc);
3464 else if (new_crtc_state->update_wm_pre)
3465 intel_update_watermarks(crtc);
3469 * Gen2 reports pipe underruns whenever all planes are disabled.
3470 * So disable underrun reporting before all the planes get disabled.
3472 * We do this after .initial_watermarks() so that we have a
3473 * chance of catching underruns with the intermediate watermarks
3474 * vs. the old plane configuration.
3476 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
3477 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3480 * WA for platforms where async address update enable bit
3481 * is double buffered and only latched at start of vblank.
3483 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
3484 intel_crtc_async_flip_disable_wa(state, crtc);
3487 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
3488 struct intel_crtc *crtc)
3490 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3491 const struct intel_crtc_state *new_crtc_state =
3492 intel_atomic_get_new_crtc_state(state, crtc);
3493 unsigned int update_mask = new_crtc_state->update_planes;
3494 const struct intel_plane_state *old_plane_state;
3495 struct intel_plane *plane;
3496 unsigned fb_bits = 0;
3499 intel_crtc_dpms_overlay_disable(crtc);
3501 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3502 if (crtc->pipe != plane->pipe ||
3503 !(update_mask & BIT(plane->id)))
3506 intel_disable_plane(plane, new_crtc_state);
3508 if (old_plane_state->uapi.visible)
3509 fb_bits |= plane->frontbuffer_bit;
3512 intel_frontbuffer_flip(dev_priv, fb_bits);
3516 * intel_connector_primary_encoder - get the primary encoder for a connector
3517 * @connector: connector for which to return the encoder
3519 * Returns the primary encoder for a connector. There is a 1:1 mapping from
3520 * all connectors to their encoder, except for DP-MST connectors which have
3521 * both a virtual and a primary encoder. These DP-MST primary encoders can be
3522 * pointed to by as many DP-MST connectors as there are pipes.
3524 static struct intel_encoder *
3525 intel_connector_primary_encoder(struct intel_connector *connector)
3527 struct intel_encoder *encoder;
3529 if (connector->mst_port)
3530 return &dp_to_dig_port(connector->mst_port)->base;
3532 encoder = intel_attached_encoder(connector);
3533 drm_WARN_ON(connector->base.dev, !encoder);
3538 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3540 struct drm_connector_state *new_conn_state;
3541 struct drm_connector *connector;
3544 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3546 struct intel_connector *intel_connector;
3547 struct intel_encoder *encoder;
3548 struct intel_crtc *crtc;
3550 if (!intel_connector_needs_modeset(state, connector))
3553 intel_connector = to_intel_connector(connector);
3554 encoder = intel_connector_primary_encoder(intel_connector);
3555 if (!encoder->update_prepare)
3558 crtc = new_conn_state->crtc ?
3559 to_intel_crtc(new_conn_state->crtc) : NULL;
3560 encoder->update_prepare(state, encoder, crtc);
3564 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3566 struct drm_connector_state *new_conn_state;
3567 struct drm_connector *connector;
3570 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3572 struct intel_connector *intel_connector;
3573 struct intel_encoder *encoder;
3574 struct intel_crtc *crtc;
3576 if (!intel_connector_needs_modeset(state, connector))
3579 intel_connector = to_intel_connector(connector);
3580 encoder = intel_connector_primary_encoder(intel_connector);
3581 if (!encoder->update_complete)
3584 crtc = new_conn_state->crtc ?
3585 to_intel_crtc(new_conn_state->crtc) : NULL;
3586 encoder->update_complete(state, encoder, crtc);
3590 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3591 struct intel_crtc *crtc)
3593 const struct intel_crtc_state *crtc_state =
3594 intel_atomic_get_new_crtc_state(state, crtc);
3595 const struct drm_connector_state *conn_state;
3596 struct drm_connector *conn;
3599 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3600 struct intel_encoder *encoder =
3601 to_intel_encoder(conn_state->best_encoder);
3603 if (conn_state->crtc != &crtc->base)
3606 if (encoder->pre_pll_enable)
3607 encoder->pre_pll_enable(state, encoder,
3608 crtc_state, conn_state);
3612 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3613 struct intel_crtc *crtc)
3615 const struct intel_crtc_state *crtc_state =
3616 intel_atomic_get_new_crtc_state(state, crtc);
3617 const struct drm_connector_state *conn_state;
3618 struct drm_connector *conn;
3621 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3622 struct intel_encoder *encoder =
3623 to_intel_encoder(conn_state->best_encoder);
3625 if (conn_state->crtc != &crtc->base)
3628 if (encoder->pre_enable)
3629 encoder->pre_enable(state, encoder,
3630 crtc_state, conn_state);
3634 static void intel_encoders_enable(struct intel_atomic_state *state,
3635 struct intel_crtc *crtc)
3637 const struct intel_crtc_state *crtc_state =
3638 intel_atomic_get_new_crtc_state(state, crtc);
3639 const struct drm_connector_state *conn_state;
3640 struct drm_connector *conn;
3643 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3644 struct intel_encoder *encoder =
3645 to_intel_encoder(conn_state->best_encoder);
3647 if (conn_state->crtc != &crtc->base)
3650 if (encoder->enable)
3651 encoder->enable(state, encoder,
3652 crtc_state, conn_state);
3653 intel_opregion_notify_encoder(encoder, true);
3657 static void intel_encoders_disable(struct intel_atomic_state *state,
3658 struct intel_crtc *crtc)
3660 const struct intel_crtc_state *old_crtc_state =
3661 intel_atomic_get_old_crtc_state(state, crtc);
3662 const struct drm_connector_state *old_conn_state;
3663 struct drm_connector *conn;
3666 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3667 struct intel_encoder *encoder =
3668 to_intel_encoder(old_conn_state->best_encoder);
3670 if (old_conn_state->crtc != &crtc->base)
3673 intel_opregion_notify_encoder(encoder, false);
3674 if (encoder->disable)
3675 encoder->disable(state, encoder,
3676 old_crtc_state, old_conn_state);
3680 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3681 struct intel_crtc *crtc)
3683 const struct intel_crtc_state *old_crtc_state =
3684 intel_atomic_get_old_crtc_state(state, crtc);
3685 const struct drm_connector_state *old_conn_state;
3686 struct drm_connector *conn;
3689 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3690 struct intel_encoder *encoder =
3691 to_intel_encoder(old_conn_state->best_encoder);
3693 if (old_conn_state->crtc != &crtc->base)
3696 if (encoder->post_disable)
3697 encoder->post_disable(state, encoder,
3698 old_crtc_state, old_conn_state);
3702 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3703 struct intel_crtc *crtc)
3705 const struct intel_crtc_state *old_crtc_state =
3706 intel_atomic_get_old_crtc_state(state, crtc);
3707 const struct drm_connector_state *old_conn_state;
3708 struct drm_connector *conn;
3711 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3712 struct intel_encoder *encoder =
3713 to_intel_encoder(old_conn_state->best_encoder);
3715 if (old_conn_state->crtc != &crtc->base)
3718 if (encoder->post_pll_disable)
3719 encoder->post_pll_disable(state, encoder,
3720 old_crtc_state, old_conn_state);
3724 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3725 struct intel_crtc *crtc)
3727 const struct intel_crtc_state *crtc_state =
3728 intel_atomic_get_new_crtc_state(state, crtc);
3729 const struct drm_connector_state *conn_state;
3730 struct drm_connector *conn;
3733 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3734 struct intel_encoder *encoder =
3735 to_intel_encoder(conn_state->best_encoder);
3737 if (conn_state->crtc != &crtc->base)
3740 if (encoder->update_pipe)
3741 encoder->update_pipe(state, encoder,
3742 crtc_state, conn_state);
3746 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3748 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3749 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3751 plane->disable_plane(plane, crtc_state);
3754 static void ilk_crtc_enable(struct intel_atomic_state *state,
3755 struct intel_crtc *crtc)
3757 const struct intel_crtc_state *new_crtc_state =
3758 intel_atomic_get_new_crtc_state(state, crtc);
3759 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3760 enum pipe pipe = crtc->pipe;
3762 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3766 * Sometimes spurious CPU pipe underruns happen during FDI
3767 * training, at least with VGA+HDMI cloning. Suppress them.
3769 * On ILK we get an occasional spurious CPU pipe underruns
3770 * between eDP port A enable and vdd enable. Also PCH port
3771 * enable seems to result in the occasional CPU pipe underrun.
3773 * Spurious PCH underruns also occur during PCH enabling.
3775 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3776 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3778 if (new_crtc_state->has_pch_encoder)
3779 intel_prepare_shared_dpll(new_crtc_state);
3781 if (intel_crtc_has_dp_encoder(new_crtc_state))
3782 intel_dp_set_m_n(new_crtc_state, M1_N1);
3784 intel_set_transcoder_timings(new_crtc_state);
3785 intel_set_pipe_src_size(new_crtc_state);
3787 if (new_crtc_state->has_pch_encoder)
3788 intel_cpu_transcoder_set_m_n(new_crtc_state,
3789 &new_crtc_state->fdi_m_n, NULL);
3791 ilk_set_pipeconf(new_crtc_state);
3793 crtc->active = true;
3795 intel_encoders_pre_enable(state, crtc);
3797 if (new_crtc_state->has_pch_encoder) {
3798 /* Note: FDI PLL enabling _must_ be done before we enable the
3799 * cpu pipes, hence this is separate from all the other fdi/pch
3801 ilk_fdi_pll_enable(new_crtc_state);
3803 assert_fdi_tx_disabled(dev_priv, pipe);
3804 assert_fdi_rx_disabled(dev_priv, pipe);
3807 ilk_pfit_enable(new_crtc_state);
3810 * On ILK+ LUT must be loaded before the pipe is running but with
3813 intel_color_load_luts(new_crtc_state);
3814 intel_color_commit(new_crtc_state);
3815 /* update DSPCNTR to configure gamma for pipe bottom color */
3816 intel_disable_primary_plane(new_crtc_state);
3818 if (dev_priv->display.initial_watermarks)
3819 dev_priv->display.initial_watermarks(state, crtc);
3820 intel_enable_pipe(new_crtc_state);
3822 if (new_crtc_state->has_pch_encoder)
3823 ilk_pch_enable(state, new_crtc_state);
3825 intel_crtc_vblank_on(new_crtc_state);
3827 intel_encoders_enable(state, crtc);
3829 if (HAS_PCH_CPT(dev_priv))
3830 cpt_verify_modeset(dev_priv, pipe);
3833 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3834 * And a second vblank wait is needed at least on ILK with
3835 * some interlaced HDMI modes. Let's do the double wait always
3836 * in case there are more corner cases we don't know about.
3838 if (new_crtc_state->has_pch_encoder) {
3839 intel_wait_for_vblank(dev_priv, pipe);
3840 intel_wait_for_vblank(dev_priv, pipe);
3842 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3843 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3846 /* IPS only exists on ULT machines and is tied to pipe A. */
3847 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3849 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3852 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3853 enum pipe pipe, bool apply)
3855 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3856 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3863 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3866 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3869 enum pipe pipe = crtc->pipe;
3872 val = MBUS_DBOX_A_CREDIT(2);
3874 if (INTEL_GEN(dev_priv) >= 12) {
3875 val |= MBUS_DBOX_BW_CREDIT(2);
3876 val |= MBUS_DBOX_B_CREDIT(12);
3878 val |= MBUS_DBOX_BW_CREDIT(1);
3879 val |= MBUS_DBOX_B_CREDIT(8);
3882 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3885 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3887 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3890 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3891 HSW_LINETIME(crtc_state->linetime) |
3892 HSW_IPS_LINETIME(crtc_state->ips_linetime));
3895 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3897 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3898 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3899 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3902 val = intel_de_read(dev_priv, reg);
3903 val &= ~HSW_FRAME_START_DELAY_MASK;
3904 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3905 intel_de_write(dev_priv, reg, val);
3908 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3909 const struct intel_crtc_state *crtc_state)
3911 struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3912 struct intel_crtc_state *master_crtc_state;
3913 struct drm_connector_state *conn_state;
3914 struct drm_connector *conn;
3915 struct intel_encoder *encoder = NULL;
3918 if (crtc_state->bigjoiner_slave)
3919 master = crtc_state->bigjoiner_linked_crtc;
3921 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3923 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3924 if (conn_state->crtc != &master->base)
3927 encoder = to_intel_encoder(conn_state->best_encoder);
3931 if (!crtc_state->bigjoiner_slave) {
3932 /* need to enable VDSC, which we skipped in pre-enable */
3933 intel_dsc_enable(encoder, crtc_state);
3936 * Enable sequence steps 1-7 on bigjoiner master
3938 intel_encoders_pre_pll_enable(state, master);
3939 intel_enable_shared_dpll(master_crtc_state);
3940 intel_encoders_pre_enable(state, master);
3942 /* and DSC on slave */
3943 intel_dsc_enable(NULL, crtc_state);
3947 static void hsw_crtc_enable(struct intel_atomic_state *state,
3948 struct intel_crtc *crtc)
3950 const struct intel_crtc_state *new_crtc_state =
3951 intel_atomic_get_new_crtc_state(state, crtc);
3952 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3953 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3954 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3955 bool psl_clkgate_wa;
3957 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3960 if (!new_crtc_state->bigjoiner) {
3961 intel_encoders_pre_pll_enable(state, crtc);
3963 if (new_crtc_state->shared_dpll)
3964 intel_enable_shared_dpll(new_crtc_state);
3966 intel_encoders_pre_enable(state, crtc);
3968 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3971 intel_set_pipe_src_size(new_crtc_state);
3972 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3973 bdw_set_pipemisc(new_crtc_state);
3975 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3976 intel_set_transcoder_timings(new_crtc_state);
3978 if (cpu_transcoder != TRANSCODER_EDP)
3979 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3980 new_crtc_state->pixel_multiplier - 1);
3982 if (new_crtc_state->has_pch_encoder)
3983 intel_cpu_transcoder_set_m_n(new_crtc_state,
3984 &new_crtc_state->fdi_m_n, NULL);
3986 hsw_set_frame_start_delay(new_crtc_state);
3989 if (!transcoder_is_dsi(cpu_transcoder))
3990 hsw_set_pipeconf(new_crtc_state);
3992 crtc->active = true;
3994 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3995 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3996 new_crtc_state->pch_pfit.enabled;
3998 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
4000 if (INTEL_GEN(dev_priv) >= 9)
4001 skl_pfit_enable(new_crtc_state);
4003 ilk_pfit_enable(new_crtc_state);
4006 * On ILK+ LUT must be loaded before the pipe is running but with
4009 intel_color_load_luts(new_crtc_state);
4010 intel_color_commit(new_crtc_state);
4011 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
4012 if (INTEL_GEN(dev_priv) < 9)
4013 intel_disable_primary_plane(new_crtc_state);
4015 hsw_set_linetime_wm(new_crtc_state);
4017 if (INTEL_GEN(dev_priv) >= 11)
4018 icl_set_pipe_chicken(crtc);
4020 if (dev_priv->display.initial_watermarks)
4021 dev_priv->display.initial_watermarks(state, crtc);
4023 if (INTEL_GEN(dev_priv) >= 11)
4024 icl_pipe_mbus_enable(crtc);
4026 if (new_crtc_state->bigjoiner_slave) {
4027 trace_intel_pipe_enable(crtc);
4028 intel_crtc_vblank_on(new_crtc_state);
4031 intel_encoders_enable(state, crtc);
4033 if (psl_clkgate_wa) {
4034 intel_wait_for_vblank(dev_priv, pipe);
4035 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
4038 /* If we change the relative order between pipe/planes enabling, we need
4039 * to change the workaround. */
4040 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
4041 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
4042 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4043 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4047 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4049 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4050 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4051 enum pipe pipe = crtc->pipe;
4053 /* To avoid upsetting the power well on haswell only disable the pfit if
4054 * it's in use. The hw state code will make sure we get this right. */
4055 if (!old_crtc_state->pch_pfit.enabled)
4058 intel_de_write(dev_priv, PF_CTL(pipe), 0);
4059 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
4060 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
4063 static void ilk_crtc_disable(struct intel_atomic_state *state,
4064 struct intel_crtc *crtc)
4066 const struct intel_crtc_state *old_crtc_state =
4067 intel_atomic_get_old_crtc_state(state, crtc);
4068 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4069 enum pipe pipe = crtc->pipe;
4072 * Sometimes spurious CPU pipe underruns happen when the
4073 * pipe is already disabled, but FDI RX/TX is still enabled.
4074 * Happens at least with VGA+HDMI cloning. Suppress them.
4076 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4077 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4079 intel_encoders_disable(state, crtc);
4081 intel_crtc_vblank_off(old_crtc_state);
4083 intel_disable_pipe(old_crtc_state);
4085 ilk_pfit_disable(old_crtc_state);
4087 if (old_crtc_state->has_pch_encoder)
4088 ilk_fdi_disable(crtc);
4090 intel_encoders_post_disable(state, crtc);
4092 if (old_crtc_state->has_pch_encoder) {
4093 ilk_disable_pch_transcoder(dev_priv, pipe);
4095 if (HAS_PCH_CPT(dev_priv)) {
4099 /* disable TRANS_DP_CTL */
4100 reg = TRANS_DP_CTL(pipe);
4101 temp = intel_de_read(dev_priv, reg);
4102 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4103 TRANS_DP_PORT_SEL_MASK);
4104 temp |= TRANS_DP_PORT_SEL_NONE;
4105 intel_de_write(dev_priv, reg, temp);
4107 /* disable DPLL_SEL */
4108 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
4109 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4110 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
4113 ilk_fdi_pll_disable(crtc);
4116 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4117 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4120 static void hsw_crtc_disable(struct intel_atomic_state *state,
4121 struct intel_crtc *crtc)
4124 * FIXME collapse everything to one hook.
4125 * Need care with mst->ddi interactions.
4127 intel_encoders_disable(state, crtc);
4128 intel_encoders_post_disable(state, crtc);
4131 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
4133 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4136 if (!crtc_state->gmch_pfit.control)
4140 * The panel fitter should only be adjusted whilst the pipe is disabled,
4141 * according to register description and PRM.
4143 drm_WARN_ON(&dev_priv->drm,
4144 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
4145 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
4147 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
4148 crtc_state->gmch_pfit.pgm_ratios);
4149 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
4151 /* Border color in case we don't scale up to the full screen. Black by
4152 * default, change to something else for debugging. */
4153 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
4156 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
4158 if (phy == PHY_NONE)
4160 else if (IS_ALDERLAKE_S(dev_priv))
4161 return phy <= PHY_E;
4162 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
4163 return phy <= PHY_D;
4164 else if (IS_JSL_EHL(dev_priv))
4165 return phy <= PHY_C;
4166 else if (INTEL_GEN(dev_priv) >= 11)
4167 return phy <= PHY_B;
4172 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
4174 if (IS_TIGERLAKE(dev_priv))
4175 return phy >= PHY_D && phy <= PHY_I;
4176 else if (IS_ICELAKE(dev_priv))
4177 return phy >= PHY_C && phy <= PHY_F;
4182 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
4184 if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
4185 return PHY_B + port - PORT_TC1;
4186 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
4187 return PHY_C + port - PORT_TC1;
4188 else if (IS_JSL_EHL(i915) && port == PORT_D)
4191 return PHY_A + port - PORT_A;
4194 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
4196 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
4197 return TC_PORT_NONE;
4199 if (INTEL_GEN(dev_priv) >= 12)
4200 return TC_PORT_1 + port - PORT_TC1;
4202 return TC_PORT_1 + port - PORT_C;
4205 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
4209 return POWER_DOMAIN_PORT_DDI_A_LANES;
4211 return POWER_DOMAIN_PORT_DDI_B_LANES;
4213 return POWER_DOMAIN_PORT_DDI_C_LANES;
4215 return POWER_DOMAIN_PORT_DDI_D_LANES;
4217 return POWER_DOMAIN_PORT_DDI_E_LANES;
4219 return POWER_DOMAIN_PORT_DDI_F_LANES;
4221 return POWER_DOMAIN_PORT_DDI_G_LANES;
4223 return POWER_DOMAIN_PORT_DDI_H_LANES;
4225 return POWER_DOMAIN_PORT_DDI_I_LANES;
4228 return POWER_DOMAIN_PORT_OTHER;
4232 enum intel_display_power_domain
4233 intel_aux_power_domain(struct intel_digital_port *dig_port)
4235 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4236 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
4238 if (intel_phy_is_tc(dev_priv, phy) &&
4239 dig_port->tc_mode == TC_PORT_TBT_ALT) {
4240 switch (dig_port->aux_ch) {
4242 return POWER_DOMAIN_AUX_C_TBT;
4244 return POWER_DOMAIN_AUX_D_TBT;
4246 return POWER_DOMAIN_AUX_E_TBT;
4248 return POWER_DOMAIN_AUX_F_TBT;
4250 return POWER_DOMAIN_AUX_G_TBT;
4252 return POWER_DOMAIN_AUX_H_TBT;
4254 return POWER_DOMAIN_AUX_I_TBT;
4256 MISSING_CASE(dig_port->aux_ch);
4257 return POWER_DOMAIN_AUX_C_TBT;
4261 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
4265 * Converts aux_ch to power_domain without caring about TBT ports for that use
4266 * intel_aux_power_domain()
4268 enum intel_display_power_domain
4269 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
4273 return POWER_DOMAIN_AUX_A;
4275 return POWER_DOMAIN_AUX_B;
4277 return POWER_DOMAIN_AUX_C;
4279 return POWER_DOMAIN_AUX_D;
4281 return POWER_DOMAIN_AUX_E;
4283 return POWER_DOMAIN_AUX_F;
4285 return POWER_DOMAIN_AUX_G;
4287 return POWER_DOMAIN_AUX_H;
4289 return POWER_DOMAIN_AUX_I;
4291 MISSING_CASE(aux_ch);
4292 return POWER_DOMAIN_AUX_A;
4296 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4298 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4299 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4300 struct drm_encoder *encoder;
4301 enum pipe pipe = crtc->pipe;
4303 enum transcoder transcoder = crtc_state->cpu_transcoder;
4305 if (!crtc_state->hw.active)
4308 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
4309 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
4310 if (crtc_state->pch_pfit.enabled ||
4311 crtc_state->pch_pfit.force_thru)
4312 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4314 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
4315 crtc_state->uapi.encoder_mask) {
4316 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4318 mask |= BIT_ULL(intel_encoder->power_domain);
4321 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
4322 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
4324 if (crtc_state->shared_dpll)
4325 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
4327 if (crtc_state->dsc.compression_enable)
4328 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
4334 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4336 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4337 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4338 enum intel_display_power_domain domain;
4339 u64 domains, new_domains, old_domains;
4341 domains = get_crtc_power_domains(crtc_state);
4343 new_domains = domains & ~crtc->enabled_power_domains.mask;
4344 old_domains = crtc->enabled_power_domains.mask & ~domains;
4346 for_each_power_domain(domain, new_domains)
4347 intel_display_power_get_in_set(dev_priv,
4348 &crtc->enabled_power_domains,
4354 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
4357 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
4358 &crtc->enabled_power_domains,
4362 static void valleyview_crtc_enable(struct intel_atomic_state *state,
4363 struct intel_crtc *crtc)
4365 const struct intel_crtc_state *new_crtc_state =
4366 intel_atomic_get_new_crtc_state(state, crtc);
4367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4368 enum pipe pipe = crtc->pipe;
4370 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4373 if (intel_crtc_has_dp_encoder(new_crtc_state))
4374 intel_dp_set_m_n(new_crtc_state, M1_N1);
4376 intel_set_transcoder_timings(new_crtc_state);
4377 intel_set_pipe_src_size(new_crtc_state);
4379 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4380 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4381 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
4384 i9xx_set_pipeconf(new_crtc_state);
4386 crtc->active = true;
4388 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4390 intel_encoders_pre_pll_enable(state, crtc);
4392 if (IS_CHERRYVIEW(dev_priv)) {
4393 chv_prepare_pll(crtc, new_crtc_state);
4394 chv_enable_pll(crtc, new_crtc_state);
4396 vlv_prepare_pll(crtc, new_crtc_state);
4397 vlv_enable_pll(crtc, new_crtc_state);
4400 intel_encoders_pre_enable(state, crtc);
4402 i9xx_pfit_enable(new_crtc_state);
4404 intel_color_load_luts(new_crtc_state);
4405 intel_color_commit(new_crtc_state);
4406 /* update DSPCNTR to configure gamma for pipe bottom color */
4407 intel_disable_primary_plane(new_crtc_state);
4409 dev_priv->display.initial_watermarks(state, crtc);
4410 intel_enable_pipe(new_crtc_state);
4412 intel_crtc_vblank_on(new_crtc_state);
4414 intel_encoders_enable(state, crtc);
4417 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
4419 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4420 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4422 intel_de_write(dev_priv, FP0(crtc->pipe),
4423 crtc_state->dpll_hw_state.fp0);
4424 intel_de_write(dev_priv, FP1(crtc->pipe),
4425 crtc_state->dpll_hw_state.fp1);
4428 static void i9xx_crtc_enable(struct intel_atomic_state *state,
4429 struct intel_crtc *crtc)
4431 const struct intel_crtc_state *new_crtc_state =
4432 intel_atomic_get_new_crtc_state(state, crtc);
4433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4434 enum pipe pipe = crtc->pipe;
4436 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4439 i9xx_set_pll_dividers(new_crtc_state);
4441 if (intel_crtc_has_dp_encoder(new_crtc_state))
4442 intel_dp_set_m_n(new_crtc_state, M1_N1);
4444 intel_set_transcoder_timings(new_crtc_state);
4445 intel_set_pipe_src_size(new_crtc_state);
4447 i9xx_set_pipeconf(new_crtc_state);
4449 crtc->active = true;
4451 if (!IS_GEN(dev_priv, 2))
4452 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4454 intel_encoders_pre_enable(state, crtc);
4456 i9xx_enable_pll(crtc, new_crtc_state);
4458 i9xx_pfit_enable(new_crtc_state);
4460 intel_color_load_luts(new_crtc_state);
4461 intel_color_commit(new_crtc_state);
4462 /* update DSPCNTR to configure gamma for pipe bottom color */
4463 intel_disable_primary_plane(new_crtc_state);
4465 if (dev_priv->display.initial_watermarks)
4466 dev_priv->display.initial_watermarks(state, crtc);
4468 intel_update_watermarks(crtc);
4469 intel_enable_pipe(new_crtc_state);
4471 intel_crtc_vblank_on(new_crtc_state);
4473 intel_encoders_enable(state, crtc);
4475 /* prevents spurious underruns */
4476 if (IS_GEN(dev_priv, 2))
4477 intel_wait_for_vblank(dev_priv, pipe);
4480 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4482 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4483 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4485 if (!old_crtc_state->gmch_pfit.control)
4488 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
4490 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
4491 intel_de_read(dev_priv, PFIT_CONTROL));
4492 intel_de_write(dev_priv, PFIT_CONTROL, 0);
4495 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4496 struct intel_crtc *crtc)
4498 struct intel_crtc_state *old_crtc_state =
4499 intel_atomic_get_old_crtc_state(state, crtc);
4500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4501 enum pipe pipe = crtc->pipe;
4504 * On gen2 planes are double buffered but the pipe isn't, so we must
4505 * wait for planes to fully turn off before disabling the pipe.
4507 if (IS_GEN(dev_priv, 2))
4508 intel_wait_for_vblank(dev_priv, pipe);
4510 intel_encoders_disable(state, crtc);
4512 intel_crtc_vblank_off(old_crtc_state);
4514 intel_disable_pipe(old_crtc_state);
4516 i9xx_pfit_disable(old_crtc_state);
4518 intel_encoders_post_disable(state, crtc);
4520 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4521 if (IS_CHERRYVIEW(dev_priv))
4522 chv_disable_pll(dev_priv, pipe);
4523 else if (IS_VALLEYVIEW(dev_priv))
4524 vlv_disable_pll(dev_priv, pipe);
4526 i9xx_disable_pll(old_crtc_state);
4529 intel_encoders_post_pll_disable(state, crtc);
4531 if (!IS_GEN(dev_priv, 2))
4532 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4534 if (!dev_priv->display.initial_watermarks)
4535 intel_update_watermarks(crtc);
4537 /* clock the pipe down to 640x480@60 to potentially save power */
4538 if (IS_I830(dev_priv))
4539 i830_enable_pipe(dev_priv, pipe);
4542 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4543 struct drm_modeset_acquire_ctx *ctx)
4545 struct intel_encoder *encoder;
4546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4547 struct intel_bw_state *bw_state =
4548 to_intel_bw_state(dev_priv->bw_obj.state);
4549 struct intel_cdclk_state *cdclk_state =
4550 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4551 struct intel_dbuf_state *dbuf_state =
4552 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4553 struct intel_crtc_state *crtc_state =
4554 to_intel_crtc_state(crtc->base.state);
4555 struct intel_plane *plane;
4556 struct drm_atomic_state *state;
4557 struct intel_crtc_state *temp_crtc_state;
4558 enum pipe pipe = crtc->pipe;
4561 if (!crtc_state->hw.active)
4564 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4565 const struct intel_plane_state *plane_state =
4566 to_intel_plane_state(plane->base.state);
4568 if (plane_state->uapi.visible)
4569 intel_plane_disable_noatomic(crtc, plane);
4572 state = drm_atomic_state_alloc(&dev_priv->drm);
4574 drm_dbg_kms(&dev_priv->drm,
4575 "failed to disable [CRTC:%d:%s], out of memory",
4576 crtc->base.base.id, crtc->base.name);
4580 state->acquire_ctx = ctx;
4582 /* Everything's already locked, -EDEADLK can't happen. */
4583 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4584 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4586 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4588 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4590 drm_atomic_state_put(state);
4592 drm_dbg_kms(&dev_priv->drm,
4593 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4594 crtc->base.base.id, crtc->base.name);
4596 crtc->active = false;
4597 crtc->base.enabled = false;
4599 drm_WARN_ON(&dev_priv->drm,
4600 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4601 crtc_state->uapi.active = false;
4602 crtc_state->uapi.connector_mask = 0;
4603 crtc_state->uapi.encoder_mask = 0;
4604 intel_crtc_free_hw_state(crtc_state);
4605 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4607 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4608 encoder->base.crtc = NULL;
4610 intel_fbc_disable(crtc);
4611 intel_update_watermarks(crtc);
4612 intel_disable_shared_dpll(crtc_state);
4614 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4616 dev_priv->active_pipes &= ~BIT(pipe);
4617 cdclk_state->min_cdclk[pipe] = 0;
4618 cdclk_state->min_voltage_level[pipe] = 0;
4619 cdclk_state->active_pipes &= ~BIT(pipe);
4621 dbuf_state->active_pipes &= ~BIT(pipe);
4623 bw_state->data_rate[pipe] = 0;
4624 bw_state->num_active_planes[pipe] = 0;
4628 * turn all crtc's off, but do not adjust state
4629 * This has to be paired with a call to intel_modeset_setup_hw_state.
4631 int intel_display_suspend(struct drm_device *dev)
4633 struct drm_i915_private *dev_priv = to_i915(dev);
4634 struct drm_atomic_state *state;
4637 state = drm_atomic_helper_suspend(dev);
4638 ret = PTR_ERR_OR_ZERO(state);
4640 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4643 dev_priv->modeset_restore_state = state;
4647 void intel_encoder_destroy(struct drm_encoder *encoder)
4649 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4651 drm_encoder_cleanup(encoder);
4652 kfree(intel_encoder);
4655 /* Cross check the actual hw state with our own modeset state tracking (and it's
4656 * internal consistency). */
4657 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4658 struct drm_connector_state *conn_state)
4660 struct intel_connector *connector = to_intel_connector(conn_state->connector);
4661 struct drm_i915_private *i915 = to_i915(connector->base.dev);
4663 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4664 connector->base.base.id, connector->base.name);
4666 if (connector->get_hw_state(connector)) {
4667 struct intel_encoder *encoder = intel_attached_encoder(connector);
4669 I915_STATE_WARN(!crtc_state,
4670 "connector enabled without attached crtc\n");
4675 I915_STATE_WARN(!crtc_state->hw.active,
4676 "connector is active, but attached crtc isn't\n");
4678 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4681 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4682 "atomic encoder doesn't match attached encoder\n");
4684 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4685 "attached encoder crtc differs from connector crtc\n");
4687 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4688 "attached crtc is active, but connector isn't\n");
4689 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4690 "best encoder set without crtc!\n");
4694 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4696 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4697 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4699 /* IPS only exists on ULT machines and is tied to pipe A. */
4700 if (!hsw_crtc_supports_ips(crtc))
4703 if (!dev_priv->params.enable_ips)
4706 if (crtc_state->pipe_bpp > 24)
4710 * We compare against max which means we must take
4711 * the increased cdclk requirement into account when
4712 * calculating the new cdclk.
4714 * Should measure whether using a lower cdclk w/o IPS
4716 if (IS_BROADWELL(dev_priv) &&
4717 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4723 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4725 struct drm_i915_private *dev_priv =
4726 to_i915(crtc_state->uapi.crtc->dev);
4727 struct intel_atomic_state *state =
4728 to_intel_atomic_state(crtc_state->uapi.state);
4730 crtc_state->ips_enabled = false;
4732 if (!hsw_crtc_state_ips_capable(crtc_state))
4736 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4737 * enabled and disabled dynamically based on package C states,
4738 * user space can't make reliable use of the CRCs, so let's just
4739 * completely disable it.
4741 if (crtc_state->crc_enabled)
4744 /* IPS should be fine as long as at least one plane is enabled. */
4745 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4748 if (IS_BROADWELL(dev_priv)) {
4749 const struct intel_cdclk_state *cdclk_state;
4751 cdclk_state = intel_atomic_get_cdclk_state(state);
4752 if (IS_ERR(cdclk_state))
4753 return PTR_ERR(cdclk_state);
4755 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4756 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4760 crtc_state->ips_enabled = true;
4765 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4767 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4769 /* GDG double wide on either pipe, otherwise pipe A only */
4770 return INTEL_GEN(dev_priv) < 4 &&
4771 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4774 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4776 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4777 unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
4780 * We only use IF-ID interlacing. If we ever use
4781 * PF-ID we'll need to adjust the pixel_rate here.
4784 if (!crtc_state->pch_pfit.enabled)
4787 pipe_w = crtc_state->pipe_src_w;
4788 pipe_h = crtc_state->pipe_src_h;
4790 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
4791 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
4793 if (pipe_w < pfit_w)
4795 if (pipe_h < pfit_h)
4798 if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4799 !pfit_w || !pfit_h))
4802 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4806 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4807 const struct drm_display_mode *timings)
4809 mode->hdisplay = timings->crtc_hdisplay;
4810 mode->htotal = timings->crtc_htotal;
4811 mode->hsync_start = timings->crtc_hsync_start;
4812 mode->hsync_end = timings->crtc_hsync_end;
4814 mode->vdisplay = timings->crtc_vdisplay;
4815 mode->vtotal = timings->crtc_vtotal;
4816 mode->vsync_start = timings->crtc_vsync_start;
4817 mode->vsync_end = timings->crtc_vsync_end;
4819 mode->flags = timings->flags;
4820 mode->type = DRM_MODE_TYPE_DRIVER;
4822 mode->clock = timings->crtc_clock;
4824 drm_mode_set_name(mode);
4827 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4829 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4831 if (HAS_GMCH(dev_priv))
4832 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
4833 crtc_state->pixel_rate =
4834 crtc_state->hw.pipe_mode.crtc_clock;
4836 crtc_state->pixel_rate =
4837 ilk_pipe_pixel_rate(crtc_state);
4840 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4842 struct drm_display_mode *mode = &crtc_state->hw.mode;
4843 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4844 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4846 drm_mode_copy(pipe_mode, adjusted_mode);
4848 if (crtc_state->bigjoiner) {
4850 * transcoder is programmed to the full mode,
4851 * but pipe timings are half of the transcoder mode
4853 pipe_mode->crtc_hdisplay /= 2;
4854 pipe_mode->crtc_hblank_start /= 2;
4855 pipe_mode->crtc_hblank_end /= 2;
4856 pipe_mode->crtc_hsync_start /= 2;
4857 pipe_mode->crtc_hsync_end /= 2;
4858 pipe_mode->crtc_htotal /= 2;
4859 pipe_mode->crtc_clock /= 2;
4862 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4863 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4865 intel_crtc_compute_pixel_rate(crtc_state);
4867 drm_mode_copy(mode, adjusted_mode);
4868 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4869 mode->vdisplay = crtc_state->pipe_src_h;
4872 static void intel_encoder_get_config(struct intel_encoder *encoder,
4873 struct intel_crtc_state *crtc_state)
4875 encoder->get_config(encoder, crtc_state);
4877 intel_crtc_readout_derived_state(crtc_state);
4880 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4881 struct intel_crtc_state *pipe_config)
4883 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4884 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4885 int clock_limit = dev_priv->max_dotclk_freq;
4887 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4889 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4890 if (pipe_config->bigjoiner) {
4891 pipe_mode->crtc_clock /= 2;
4892 pipe_mode->crtc_hdisplay /= 2;
4893 pipe_mode->crtc_hblank_start /= 2;
4894 pipe_mode->crtc_hblank_end /= 2;
4895 pipe_mode->crtc_hsync_start /= 2;
4896 pipe_mode->crtc_hsync_end /= 2;
4897 pipe_mode->crtc_htotal /= 2;
4898 pipe_config->pipe_src_w /= 2;
4901 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4903 if (INTEL_GEN(dev_priv) < 4) {
4904 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4907 * Enable double wide mode when the dot clock
4908 * is > 90% of the (display) core speed.
4910 if (intel_crtc_supports_double_wide(crtc) &&
4911 pipe_mode->crtc_clock > clock_limit) {
4912 clock_limit = dev_priv->max_dotclk_freq;
4913 pipe_config->double_wide = true;
4917 if (pipe_mode->crtc_clock > clock_limit) {
4918 drm_dbg_kms(&dev_priv->drm,
4919 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4920 pipe_mode->crtc_clock, clock_limit,
4921 yesno(pipe_config->double_wide));
4926 * Pipe horizontal size must be even in:
4928 * - LVDS dual channel mode
4929 * - Double wide pipe
4931 if (pipe_config->pipe_src_w & 1) {
4932 if (pipe_config->double_wide) {
4933 drm_dbg_kms(&dev_priv->drm,
4934 "Odd pipe source width not supported with double wide pipe\n");
4938 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4939 intel_is_dual_link_lvds(dev_priv)) {
4940 drm_dbg_kms(&dev_priv->drm,
4941 "Odd pipe source width not supported with dual link LVDS\n");
4946 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4947 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4949 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4950 pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4953 intel_crtc_compute_pixel_rate(pipe_config);
4955 if (pipe_config->has_pch_encoder)
4956 return ilk_fdi_compute_config(crtc, pipe_config);
4962 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4964 while (*num > DATA_LINK_M_N_MASK ||
4965 *den > DATA_LINK_M_N_MASK) {
4971 static void compute_m_n(unsigned int m, unsigned int n,
4972 u32 *ret_m, u32 *ret_n,
4976 * Several DP dongles in particular seem to be fussy about
4977 * too large link M/N values. Give N value as 0x8000 that
4978 * should be acceptable by specific devices. 0x8000 is the
4979 * specified fixed N value for asynchronous clock mode,
4980 * which the devices expect also in synchronous clock mode.
4983 *ret_n = DP_LINK_CONSTANT_N_VALUE;
4985 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4987 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4988 intel_reduce_m_n_ratio(ret_m, ret_n);
4992 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4993 int pixel_clock, int link_clock,
4994 struct intel_link_m_n *m_n,
4995 bool constant_n, bool fec_enable)
4997 u32 data_clock = bits_per_pixel * pixel_clock;
5000 data_clock = intel_dp_mode_to_fec_clock(data_clock);
5003 compute_m_n(data_clock,
5004 link_clock * nlanes * 8,
5005 &m_n->gmch_m, &m_n->gmch_n,
5008 compute_m_n(pixel_clock, link_clock,
5009 &m_n->link_m, &m_n->link_n,
5013 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
5016 * There may be no VBT; and if the BIOS enabled SSC we can
5017 * just keep using it to avoid unnecessary flicker. Whereas if the
5018 * BIOS isn't using it, don't assume it will work even if the VBT
5019 * indicates as much.
5021 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5022 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
5026 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
5027 drm_dbg_kms(&dev_priv->drm,
5028 "SSC %s by BIOS, overriding VBT which says %s\n",
5029 enableddisabled(bios_lvds_use_ssc),
5030 enableddisabled(dev_priv->vbt.lvds_use_ssc));
5031 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
5036 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5037 const struct intel_link_m_n *m_n)
5039 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5040 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5041 enum pipe pipe = crtc->pipe;
5043 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
5044 TU_SIZE(m_n->tu) | m_n->gmch_m);
5045 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5046 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5047 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5050 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
5051 enum transcoder transcoder)
5053 if (IS_HASWELL(dev_priv))
5054 return transcoder == TRANSCODER_EDP;
5057 * Strictly speaking some registers are available before
5058 * gen7, but we only support DRRS on gen7+
5060 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
5063 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5064 const struct intel_link_m_n *m_n,
5065 const struct intel_link_m_n *m2_n2)
5067 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5068 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5069 enum pipe pipe = crtc->pipe;
5070 enum transcoder transcoder = crtc_state->cpu_transcoder;
5072 if (INTEL_GEN(dev_priv) >= 5) {
5073 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
5074 TU_SIZE(m_n->tu) | m_n->gmch_m);
5075 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
5077 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
5079 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
5082 * M2_N2 registers are set only if DRRS is supported
5083 * (to make sure the registers are not unnecessarily accessed).
5085 if (m2_n2 && crtc_state->has_drrs &&
5086 transcoder_has_m2_n2(dev_priv, transcoder)) {
5087 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
5088 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5089 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
5091 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
5093 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
5097 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
5098 TU_SIZE(m_n->tu) | m_n->gmch_m);
5099 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5100 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
5101 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
5105 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
5107 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5108 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
5111 dp_m_n = &crtc_state->dp_m_n;
5112 dp_m2_n2 = &crtc_state->dp_m2_n2;
5113 } else if (m_n == M2_N2) {
5116 * M2_N2 registers are not supported. Hence m2_n2 divider value
5117 * needs to be programmed into M1_N1.
5119 dp_m_n = &crtc_state->dp_m2_n2;
5121 drm_err(&i915->drm, "Unsupported divider value\n");
5125 if (crtc_state->has_pch_encoder)
5126 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
5128 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
5131 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
5133 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5135 enum pipe pipe = crtc->pipe;
5136 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5137 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
5138 u32 crtc_vtotal, crtc_vblank_end;
5141 /* We need to be careful not to changed the adjusted mode, for otherwise
5142 * the hw state checker will get angry at the mismatch. */
5143 crtc_vtotal = adjusted_mode->crtc_vtotal;
5144 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5146 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5147 /* the chip adds 2 halflines automatically */
5149 crtc_vblank_end -= 1;
5151 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5152 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5154 vsyncshift = adjusted_mode->crtc_hsync_start -
5155 adjusted_mode->crtc_htotal / 2;
5157 vsyncshift += adjusted_mode->crtc_htotal;
5160 if (INTEL_GEN(dev_priv) > 3)
5161 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
5164 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
5165 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
5166 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
5167 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
5168 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
5169 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
5171 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
5172 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
5173 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
5174 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
5175 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
5176 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
5178 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5179 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5180 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5182 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
5183 (pipe == PIPE_B || pipe == PIPE_C))
5184 intel_de_write(dev_priv, VTOTAL(pipe),
5185 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5189 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
5191 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5192 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5193 enum pipe pipe = crtc->pipe;
5195 /* pipesrc controls the size that is scaled from, which should
5196 * always be the user's requested size.
5198 intel_de_write(dev_priv, PIPESRC(pipe),
5199 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
5202 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
5204 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5205 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5207 if (IS_GEN(dev_priv, 2))
5210 if (INTEL_GEN(dev_priv) >= 9 ||
5211 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
5212 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
5214 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
5217 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
5218 struct intel_crtc_state *pipe_config)
5220 struct drm_device *dev = crtc->base.dev;
5221 struct drm_i915_private *dev_priv = to_i915(dev);
5222 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5225 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
5226 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5227 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5229 if (!transcoder_is_dsi(cpu_transcoder)) {
5230 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
5231 pipe_config->hw.adjusted_mode.crtc_hblank_start =
5233 pipe_config->hw.adjusted_mode.crtc_hblank_end =
5234 ((tmp >> 16) & 0xffff) + 1;
5236 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
5237 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5238 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5240 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
5241 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5242 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5244 if (!transcoder_is_dsi(cpu_transcoder)) {
5245 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
5246 pipe_config->hw.adjusted_mode.crtc_vblank_start =
5248 pipe_config->hw.adjusted_mode.crtc_vblank_end =
5249 ((tmp >> 16) & 0xffff) + 1;
5251 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
5252 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5253 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5255 if (intel_pipe_is_interlaced(pipe_config)) {
5256 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5257 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
5258 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
5262 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
5263 struct intel_crtc_state *pipe_config)
5265 struct drm_device *dev = crtc->base.dev;
5266 struct drm_i915_private *dev_priv = to_i915(dev);
5269 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
5270 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5271 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5274 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
5276 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5277 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5282 /* we keep both pipes enabled on 830 */
5283 if (IS_I830(dev_priv))
5284 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
5286 if (crtc_state->double_wide)
5287 pipeconf |= PIPECONF_DOUBLE_WIDE;
5289 /* only g4x and later have fancy bpc/dither controls */
5290 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5291 IS_CHERRYVIEW(dev_priv)) {
5292 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5293 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
5294 pipeconf |= PIPECONF_DITHER_EN |
5295 PIPECONF_DITHER_TYPE_SP;
5297 switch (crtc_state->pipe_bpp) {
5299 pipeconf |= PIPECONF_6BPC;
5302 pipeconf |= PIPECONF_8BPC;
5305 pipeconf |= PIPECONF_10BPC;
5308 /* Case prevented by intel_choose_pipe_bpp_dither. */
5313 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5314 if (INTEL_GEN(dev_priv) < 4 ||
5315 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5316 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5318 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5320 pipeconf |= PIPECONF_PROGRESSIVE;
5323 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5324 crtc_state->limited_color_range)
5325 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5327 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5329 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5331 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
5332 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
5335 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
5337 if (IS_I830(dev_priv))
5340 return INTEL_GEN(dev_priv) >= 4 ||
5341 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
5344 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
5346 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5347 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5350 if (!i9xx_has_pfit(dev_priv))
5353 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
5354 if (!(tmp & PFIT_ENABLE))
5357 /* Check whether the pfit is attached to our pipe. */
5358 if (INTEL_GEN(dev_priv) < 4) {
5359 if (crtc->pipe != PIPE_B)
5362 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5366 crtc_state->gmch_pfit.control = tmp;
5367 crtc_state->gmch_pfit.pgm_ratios =
5368 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
5371 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5372 struct intel_crtc_state *pipe_config)
5374 struct drm_device *dev = crtc->base.dev;
5375 struct drm_i915_private *dev_priv = to_i915(dev);
5376 enum pipe pipe = crtc->pipe;
5379 int refclk = 100000;
5381 /* In case of DSI, DPLL will not be used */
5382 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5385 vlv_dpio_get(dev_priv);
5386 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5387 vlv_dpio_put(dev_priv);
5389 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5390 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5391 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5392 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5393 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5395 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
5398 static void chv_crtc_clock_get(struct intel_crtc *crtc,
5399 struct intel_crtc_state *pipe_config)
5401 struct drm_device *dev = crtc->base.dev;
5402 struct drm_i915_private *dev_priv = to_i915(dev);
5403 enum pipe pipe = crtc->pipe;
5404 enum dpio_channel port = vlv_pipe_to_channel(pipe);
5406 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5407 int refclk = 100000;
5409 /* In case of DSI, DPLL will not be used */
5410 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5413 vlv_dpio_get(dev_priv);
5414 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
5415 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
5416 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
5417 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
5418 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
5419 vlv_dpio_put(dev_priv);
5421 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
5422 clock.m2 = (pll_dw0 & 0xff) << 22;
5423 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
5424 clock.m2 |= pll_dw2 & 0x3fffff;
5425 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
5426 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
5427 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
5429 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5432 static enum intel_output_format
5433 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
5435 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5438 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5440 if (tmp & PIPEMISC_YUV420_ENABLE) {
5441 /* We support 4:2:0 in full blend mode only */
5442 drm_WARN_ON(&dev_priv->drm,
5443 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
5445 return INTEL_OUTPUT_FORMAT_YCBCR420;
5446 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
5447 return INTEL_OUTPUT_FORMAT_YCBCR444;
5449 return INTEL_OUTPUT_FORMAT_RGB;
5453 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
5455 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5456 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5458 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
5461 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
5463 if (tmp & DISPPLANE_GAMMA_ENABLE)
5464 crtc_state->gamma_enable = true;
5466 if (!HAS_GMCH(dev_priv) &&
5467 tmp & DISPPLANE_PIPE_CSC_ENABLE)
5468 crtc_state->csc_enable = true;
5471 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5472 struct intel_crtc_state *pipe_config)
5474 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5475 enum intel_display_power_domain power_domain;
5476 intel_wakeref_t wakeref;
5480 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5481 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5485 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5486 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5487 pipe_config->shared_dpll = NULL;
5491 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5492 if (!(tmp & PIPECONF_ENABLE))
5495 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5496 IS_CHERRYVIEW(dev_priv)) {
5497 switch (tmp & PIPECONF_BPC_MASK) {
5499 pipe_config->pipe_bpp = 18;
5502 pipe_config->pipe_bpp = 24;
5504 case PIPECONF_10BPC:
5505 pipe_config->pipe_bpp = 30;
5512 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5513 (tmp & PIPECONF_COLOR_RANGE_SELECT))
5514 pipe_config->limited_color_range = true;
5516 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5517 PIPECONF_GAMMA_MODE_SHIFT;
5519 if (IS_CHERRYVIEW(dev_priv))
5520 pipe_config->cgm_mode = intel_de_read(dev_priv,
5521 CGM_PIPE_MODE(crtc->pipe));
5523 i9xx_get_pipe_color_config(pipe_config);
5524 intel_color_get_config(pipe_config);
5526 if (INTEL_GEN(dev_priv) < 4)
5527 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5529 intel_get_transcoder_timings(crtc, pipe_config);
5530 intel_get_pipe_src_size(crtc, pipe_config);
5532 i9xx_get_pfit_config(pipe_config);
5534 if (INTEL_GEN(dev_priv) >= 4) {
5535 /* No way to read it out on pipes B and C */
5536 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5537 tmp = dev_priv->chv_dpll_md[crtc->pipe];
5539 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5540 pipe_config->pixel_multiplier =
5541 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5542 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5543 pipe_config->dpll_hw_state.dpll_md = tmp;
5544 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5545 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5546 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5547 pipe_config->pixel_multiplier =
5548 ((tmp & SDVO_MULTIPLIER_MASK)
5549 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5551 /* Note that on i915G/GM the pixel multiplier is in the sdvo
5552 * port and will be fixed up in the encoder->get_config
5554 pipe_config->pixel_multiplier = 1;
5556 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5558 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5559 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5561 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5564 /* Mask out read-only status bits. */
5565 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5566 DPLL_PORTC_READY_MASK |
5567 DPLL_PORTB_READY_MASK);
5570 if (IS_CHERRYVIEW(dev_priv))
5571 chv_crtc_clock_get(crtc, pipe_config);
5572 else if (IS_VALLEYVIEW(dev_priv))
5573 vlv_crtc_clock_get(crtc, pipe_config);
5575 i9xx_crtc_clock_get(crtc, pipe_config);
5578 * Normally the dotclock is filled in by the encoder .get_config()
5579 * but in case the pipe is enabled w/o any ports we need a sane
5582 pipe_config->hw.adjusted_mode.crtc_clock =
5583 pipe_config->port_clock / pipe_config->pixel_multiplier;
5588 intel_display_power_put(dev_priv, power_domain, wakeref);
5593 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5595 struct intel_encoder *encoder;
5598 bool has_lvds = false;
5599 bool has_cpu_edp = false;
5600 bool has_panel = false;
5601 bool has_ck505 = false;
5602 bool can_ssc = false;
5603 bool using_ssc_source = false;
5605 /* We need to take the global config into account */
5606 for_each_intel_encoder(&dev_priv->drm, encoder) {
5607 switch (encoder->type) {
5608 case INTEL_OUTPUT_LVDS:
5612 case INTEL_OUTPUT_EDP:
5614 if (encoder->port == PORT_A)
5622 if (HAS_PCH_IBX(dev_priv)) {
5623 has_ck505 = dev_priv->vbt.display_clock_mode;
5624 can_ssc = has_ck505;
5630 /* Check if any DPLLs are using the SSC source */
5631 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5632 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5634 if (!(temp & DPLL_VCO_ENABLE))
5637 if ((temp & PLL_REF_INPUT_MASK) ==
5638 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5639 using_ssc_source = true;
5644 drm_dbg_kms(&dev_priv->drm,
5645 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5646 has_panel, has_lvds, has_ck505, using_ssc_source);
5648 /* Ironlake: try to setup display ref clock before DPLL
5649 * enabling. This is only under driver's control after
5650 * PCH B stepping, previous chipset stepping should be
5651 * ignoring this setting.
5653 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5655 /* As we must carefully and slowly disable/enable each source in turn,
5656 * compute the final state we want first and check if we need to
5657 * make any changes at all.
5660 final &= ~DREF_NONSPREAD_SOURCE_MASK;
5662 final |= DREF_NONSPREAD_CK505_ENABLE;
5664 final |= DREF_NONSPREAD_SOURCE_ENABLE;
5666 final &= ~DREF_SSC_SOURCE_MASK;
5667 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5668 final &= ~DREF_SSC1_ENABLE;
5671 final |= DREF_SSC_SOURCE_ENABLE;
5673 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5674 final |= DREF_SSC1_ENABLE;
5677 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5678 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5680 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5682 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5683 } else if (using_ssc_source) {
5684 final |= DREF_SSC_SOURCE_ENABLE;
5685 final |= DREF_SSC1_ENABLE;
5691 /* Always enable nonspread source */
5692 val &= ~DREF_NONSPREAD_SOURCE_MASK;
5695 val |= DREF_NONSPREAD_CK505_ENABLE;
5697 val |= DREF_NONSPREAD_SOURCE_ENABLE;
5700 val &= ~DREF_SSC_SOURCE_MASK;
5701 val |= DREF_SSC_SOURCE_ENABLE;
5703 /* SSC must be turned on before enabling the CPU output */
5704 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5705 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5706 val |= DREF_SSC1_ENABLE;
5708 val &= ~DREF_SSC1_ENABLE;
5710 /* Get SSC going before enabling the outputs */
5711 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5712 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5715 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5717 /* Enable CPU source on CPU attached eDP */
5719 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5720 drm_dbg_kms(&dev_priv->drm,
5721 "Using SSC on eDP\n");
5722 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5724 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5726 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5728 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5729 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5732 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5734 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5736 /* Turn off CPU output */
5737 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5739 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5740 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5743 if (!using_ssc_source) {
5744 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5746 /* Turn off the SSC source */
5747 val &= ~DREF_SSC_SOURCE_MASK;
5748 val |= DREF_SSC_SOURCE_DISABLE;
5751 val &= ~DREF_SSC1_ENABLE;
5753 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5754 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5759 BUG_ON(val != final);
5762 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5766 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5767 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5768 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5770 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5771 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5772 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5774 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5775 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5776 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5778 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5779 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5780 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5783 /* WaMPhyProgramming:hsw */
5784 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5788 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5789 tmp &= ~(0xFF << 24);
5790 tmp |= (0x12 << 24);
5791 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5793 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5795 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5797 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5799 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5801 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5802 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5803 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5805 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5806 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5807 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5809 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5812 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5814 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5817 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5819 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5822 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5824 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5827 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5829 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5830 tmp &= ~(0xFF << 16);
5831 tmp |= (0x1C << 16);
5832 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5834 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5835 tmp &= ~(0xFF << 16);
5836 tmp |= (0x1C << 16);
5837 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5839 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5841 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5843 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5845 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5847 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5848 tmp &= ~(0xF << 28);
5850 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5852 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5853 tmp &= ~(0xF << 28);
5855 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5858 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5859 * Programming" based on the parameters passed:
5860 * - Sequence to enable CLKOUT_DP
5861 * - Sequence to enable CLKOUT_DP without spread
5862 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5864 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5865 bool with_spread, bool with_fdi)
5869 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5870 "FDI requires downspread\n"))
5872 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5873 with_fdi, "LP PCH doesn't have FDI\n"))
5876 mutex_lock(&dev_priv->sb_lock);
5878 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5879 tmp &= ~SBI_SSCCTL_DISABLE;
5880 tmp |= SBI_SSCCTL_PATHALT;
5881 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5886 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5887 tmp &= ~SBI_SSCCTL_PATHALT;
5888 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5891 lpt_reset_fdi_mphy(dev_priv);
5892 lpt_program_fdi_mphy(dev_priv);
5896 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5897 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5898 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5899 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5901 mutex_unlock(&dev_priv->sb_lock);
5904 /* Sequence to disable CLKOUT_DP */
5905 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5909 mutex_lock(&dev_priv->sb_lock);
5911 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5912 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5913 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5914 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5916 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5917 if (!(tmp & SBI_SSCCTL_DISABLE)) {
5918 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5919 tmp |= SBI_SSCCTL_PATHALT;
5920 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5923 tmp |= SBI_SSCCTL_DISABLE;
5924 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5927 mutex_unlock(&dev_priv->sb_lock);
5930 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5932 static const u16 sscdivintphase[] = {
5933 [BEND_IDX( 50)] = 0x3B23,
5934 [BEND_IDX( 45)] = 0x3B23,
5935 [BEND_IDX( 40)] = 0x3C23,
5936 [BEND_IDX( 35)] = 0x3C23,
5937 [BEND_IDX( 30)] = 0x3D23,
5938 [BEND_IDX( 25)] = 0x3D23,
5939 [BEND_IDX( 20)] = 0x3E23,
5940 [BEND_IDX( 15)] = 0x3E23,
5941 [BEND_IDX( 10)] = 0x3F23,
5942 [BEND_IDX( 5)] = 0x3F23,
5943 [BEND_IDX( 0)] = 0x0025,
5944 [BEND_IDX( -5)] = 0x0025,
5945 [BEND_IDX(-10)] = 0x0125,
5946 [BEND_IDX(-15)] = 0x0125,
5947 [BEND_IDX(-20)] = 0x0225,
5948 [BEND_IDX(-25)] = 0x0225,
5949 [BEND_IDX(-30)] = 0x0325,
5950 [BEND_IDX(-35)] = 0x0325,
5951 [BEND_IDX(-40)] = 0x0425,
5952 [BEND_IDX(-45)] = 0x0425,
5953 [BEND_IDX(-50)] = 0x0525,
5958 * steps -50 to 50 inclusive, in steps of 5
5959 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5960 * change in clock period = -(steps / 10) * 5.787 ps
5962 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5965 int idx = BEND_IDX(steps);
5967 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5970 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5973 mutex_lock(&dev_priv->sb_lock);
5975 if (steps % 10 != 0)
5979 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5981 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5983 tmp |= sscdivintphase[idx];
5984 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5986 mutex_unlock(&dev_priv->sb_lock);
5991 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5993 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5994 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5996 if ((ctl & SPLL_PLL_ENABLE) == 0)
5999 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
6000 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6003 if (IS_BROADWELL(dev_priv) &&
6004 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
6010 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
6011 enum intel_dpll_id id)
6013 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
6014 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
6016 if ((ctl & WRPLL_PLL_ENABLE) == 0)
6019 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
6022 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
6023 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
6024 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6030 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
6032 struct intel_encoder *encoder;
6033 bool has_fdi = false;
6035 for_each_intel_encoder(&dev_priv->drm, encoder) {
6036 switch (encoder->type) {
6037 case INTEL_OUTPUT_ANALOG:
6046 * The BIOS may have decided to use the PCH SSC
6047 * reference so we must not disable it until the
6048 * relevant PLLs have stopped relying on it. We'll
6049 * just leave the PCH SSC reference enabled in case
6050 * any active PLL is using it. It will get disabled
6051 * after runtime suspend if we don't have FDI.
6053 * TODO: Move the whole reference clock handling
6054 * to the modeset sequence proper so that we can
6055 * actually enable/disable/reconfigure these things
6056 * safely. To do that we need to introduce a real
6057 * clock hierarchy. That would also allow us to do
6058 * clock bending finally.
6060 dev_priv->pch_ssc_use = 0;
6062 if (spll_uses_pch_ssc(dev_priv)) {
6063 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
6064 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
6067 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
6068 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
6069 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
6072 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
6073 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
6074 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
6077 if (dev_priv->pch_ssc_use)
6081 lpt_bend_clkout_dp(dev_priv, 0);
6082 lpt_enable_clkout_dp(dev_priv, true, true);
6084 lpt_disable_clkout_dp(dev_priv);
6089 * Initialize reference clocks when the driver loads
6091 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
6093 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
6094 ilk_init_pch_refclk(dev_priv);
6095 else if (HAS_PCH_LPT(dev_priv))
6096 lpt_init_pch_refclk(dev_priv);
6099 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
6101 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6102 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6103 enum pipe pipe = crtc->pipe;
6108 switch (crtc_state->pipe_bpp) {
6110 val |= PIPECONF_6BPC;
6113 val |= PIPECONF_8BPC;
6116 val |= PIPECONF_10BPC;
6119 val |= PIPECONF_12BPC;
6122 /* Case prevented by intel_choose_pipe_bpp_dither. */
6126 if (crtc_state->dither)
6127 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6129 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6130 val |= PIPECONF_INTERLACED_ILK;
6132 val |= PIPECONF_PROGRESSIVE;
6135 * This would end up with an odd purple hue over
6136 * the entire display. Make sure we don't do it.
6138 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
6139 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
6141 if (crtc_state->limited_color_range &&
6142 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6143 val |= PIPECONF_COLOR_RANGE_SELECT;
6145 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6146 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
6148 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
6150 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
6152 intel_de_write(dev_priv, PIPECONF(pipe), val);
6153 intel_de_posting_read(dev_priv, PIPECONF(pipe));
6156 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
6158 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6159 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6160 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6163 if (IS_HASWELL(dev_priv) && crtc_state->dither)
6164 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6166 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6167 val |= PIPECONF_INTERLACED_ILK;
6169 val |= PIPECONF_PROGRESSIVE;
6171 if (IS_HASWELL(dev_priv) &&
6172 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6173 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
6175 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
6176 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
6179 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
6181 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6182 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6185 switch (crtc_state->pipe_bpp) {
6187 val |= PIPEMISC_DITHER_6_BPC;
6190 val |= PIPEMISC_DITHER_8_BPC;
6193 val |= PIPEMISC_DITHER_10_BPC;
6196 val |= PIPEMISC_DITHER_12_BPC;
6199 MISSING_CASE(crtc_state->pipe_bpp);
6203 if (crtc_state->dither)
6204 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6206 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6207 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
6208 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
6210 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
6211 val |= PIPEMISC_YUV420_ENABLE |
6212 PIPEMISC_YUV420_MODE_FULL_BLEND;
6214 if (INTEL_GEN(dev_priv) >= 11 &&
6215 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
6216 BIT(PLANE_CURSOR))) == 0)
6217 val |= PIPEMISC_HDR_MODE_PRECISION;
6219 if (INTEL_GEN(dev_priv) >= 12)
6220 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
6222 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
6225 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
6227 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6230 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
6232 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
6233 case PIPEMISC_DITHER_6_BPC:
6235 case PIPEMISC_DITHER_8_BPC:
6237 case PIPEMISC_DITHER_10_BPC:
6239 case PIPEMISC_DITHER_12_BPC:
6247 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
6250 * Account for spread spectrum to avoid
6251 * oversubscribing the link. Max center spread
6252 * is 2.5%; use 5% for safety's sake.
6254 u32 bps = target_clock * bpp * 21 / 20;
6255 return DIV_ROUND_UP(bps, link_bw * 8);
6258 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6259 struct intel_link_m_n *m_n)
6261 struct drm_device *dev = crtc->base.dev;
6262 struct drm_i915_private *dev_priv = to_i915(dev);
6263 enum pipe pipe = crtc->pipe;
6265 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
6266 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
6267 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6269 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
6270 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6271 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6274 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6275 enum transcoder transcoder,
6276 struct intel_link_m_n *m_n,
6277 struct intel_link_m_n *m2_n2)
6279 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6280 enum pipe pipe = crtc->pipe;
6282 if (INTEL_GEN(dev_priv) >= 5) {
6283 m_n->link_m = intel_de_read(dev_priv,
6284 PIPE_LINK_M1(transcoder));
6285 m_n->link_n = intel_de_read(dev_priv,
6286 PIPE_LINK_N1(transcoder));
6287 m_n->gmch_m = intel_de_read(dev_priv,
6288 PIPE_DATA_M1(transcoder))
6290 m_n->gmch_n = intel_de_read(dev_priv,
6291 PIPE_DATA_N1(transcoder));
6292 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
6293 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6295 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
6296 m2_n2->link_m = intel_de_read(dev_priv,
6297 PIPE_LINK_M2(transcoder));
6298 m2_n2->link_n = intel_de_read(dev_priv,
6299 PIPE_LINK_N2(transcoder));
6300 m2_n2->gmch_m = intel_de_read(dev_priv,
6301 PIPE_DATA_M2(transcoder))
6303 m2_n2->gmch_n = intel_de_read(dev_priv,
6304 PIPE_DATA_N2(transcoder));
6305 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
6306 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6309 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
6310 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
6311 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6313 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
6314 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6315 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6319 void intel_dp_get_m_n(struct intel_crtc *crtc,
6320 struct intel_crtc_state *pipe_config)
6322 if (pipe_config->has_pch_encoder)
6323 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6325 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6326 &pipe_config->dp_m_n,
6327 &pipe_config->dp_m2_n2);
6330 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
6331 struct intel_crtc_state *pipe_config)
6333 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6334 &pipe_config->fdi_m_n, NULL);
6337 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
6340 drm_rect_init(&crtc_state->pch_pfit.dst,
6341 pos >> 16, pos & 0xffff,
6342 size >> 16, size & 0xffff);
6345 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
6347 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6348 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6349 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
6353 /* find scaler attached to this pipe */
6354 for (i = 0; i < crtc->num_scalers; i++) {
6357 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
6358 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
6362 crtc_state->pch_pfit.enabled = true;
6364 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
6365 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
6367 ilk_get_pfit_pos_size(crtc_state, pos, size);
6369 scaler_state->scalers[i].in_use = true;
6373 scaler_state->scaler_id = id;
6375 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
6377 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
6380 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
6382 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6383 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6386 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
6387 if ((ctl & PF_ENABLE) == 0)
6390 crtc_state->pch_pfit.enabled = true;
6392 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
6393 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
6395 ilk_get_pfit_pos_size(crtc_state, pos, size);
6398 * We currently do not free assignements of panel fitters on
6399 * ivb/hsw (since we don't use the higher upscaling modes which
6400 * differentiates them) so just WARN about this case for now.
6402 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
6403 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
6406 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
6407 struct intel_crtc_state *pipe_config)
6409 struct drm_device *dev = crtc->base.dev;
6410 struct drm_i915_private *dev_priv = to_i915(dev);
6411 enum intel_display_power_domain power_domain;
6412 intel_wakeref_t wakeref;
6416 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6417 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6421 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6422 pipe_config->shared_dpll = NULL;
6425 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6426 if (!(tmp & PIPECONF_ENABLE))
6429 switch (tmp & PIPECONF_BPC_MASK) {
6431 pipe_config->pipe_bpp = 18;
6434 pipe_config->pipe_bpp = 24;
6436 case PIPECONF_10BPC:
6437 pipe_config->pipe_bpp = 30;
6439 case PIPECONF_12BPC:
6440 pipe_config->pipe_bpp = 36;
6446 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
6447 pipe_config->limited_color_range = true;
6449 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
6450 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
6451 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
6452 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6455 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6459 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
6460 PIPECONF_GAMMA_MODE_SHIFT;
6462 pipe_config->csc_mode = intel_de_read(dev_priv,
6463 PIPE_CSC_MODE(crtc->pipe));
6465 i9xx_get_pipe_color_config(pipe_config);
6466 intel_color_get_config(pipe_config);
6468 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6469 struct intel_shared_dpll *pll;
6470 enum intel_dpll_id pll_id;
6473 pipe_config->has_pch_encoder = true;
6475 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6476 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6477 FDI_DP_PORT_WIDTH_SHIFT) + 1;
6479 ilk_get_fdi_m_n_config(crtc, pipe_config);
6481 if (HAS_PCH_IBX(dev_priv)) {
6483 * The pipe->pch transcoder and pch transcoder->pll
6486 pll_id = (enum intel_dpll_id) crtc->pipe;
6488 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6489 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6490 pll_id = DPLL_ID_PCH_PLL_B;
6492 pll_id= DPLL_ID_PCH_PLL_A;
6495 pipe_config->shared_dpll =
6496 intel_get_shared_dpll_by_id(dev_priv, pll_id);
6497 pll = pipe_config->shared_dpll;
6499 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6500 &pipe_config->dpll_hw_state);
6501 drm_WARN_ON(dev, !pll_active);
6503 tmp = pipe_config->dpll_hw_state.dpll;
6504 pipe_config->pixel_multiplier =
6505 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6506 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6508 ilk_pch_clock_get(crtc, pipe_config);
6510 pipe_config->pixel_multiplier = 1;
6513 intel_get_transcoder_timings(crtc, pipe_config);
6514 intel_get_pipe_src_size(crtc, pipe_config);
6516 ilk_get_pfit_config(pipe_config);
6521 intel_display_power_put(dev_priv, power_domain, wakeref);
6526 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6527 struct intel_crtc_state *pipe_config)
6529 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
6530 enum phy phy = intel_port_to_phy(dev_priv, port);
6531 struct icl_port_dpll *port_dpll;
6532 struct intel_shared_dpll *pll;
6533 enum intel_dpll_id id;
6537 clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
6538 id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
6540 if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
6543 pll = intel_get_shared_dpll_by_id(dev_priv, id);
6544 port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
6546 port_dpll->pll = pll;
6547 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6548 &port_dpll->hw_state);
6549 drm_WARN_ON(&dev_priv->drm, !pll_active);
6551 icl_set_active_port_dpll(pipe_config, port_dpll_id);
6554 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6555 struct intel_crtc_state *pipe_config)
6557 enum phy phy = intel_port_to_phy(dev_priv, port);
6558 enum icl_port_dpll_id port_dpll_id;
6559 struct icl_port_dpll *port_dpll;
6560 struct intel_shared_dpll *pll;
6561 enum intel_dpll_id id;
6566 if (intel_phy_is_combo(dev_priv, phy)) {
6569 if (IS_ALDERLAKE_S(dev_priv)) {
6570 reg = ADLS_DPCLKA_CFGCR(phy);
6571 mask = ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy);
6572 shift = ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy);
6573 } else if (IS_ROCKETLAKE(dev_priv)) {
6574 reg = ICL_DPCLKA_CFGCR0;
6575 mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
6576 shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
6578 reg = ICL_DPCLKA_CFGCR0;
6579 mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
6580 shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
6583 temp = intel_de_read(dev_priv, reg) & mask;
6585 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
6586 } else if (intel_phy_is_tc(dev_priv, phy)) {
6587 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
6589 if (clk_sel == DDI_CLK_SEL_MG) {
6590 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
6592 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
6594 drm_WARN_ON(&dev_priv->drm,
6595 clk_sel < DDI_CLK_SEL_TBT_162);
6596 id = DPLL_ID_ICL_TBTPLL;
6597 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
6600 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
6604 pll = intel_get_shared_dpll_by_id(dev_priv, id);
6605 port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
6607 port_dpll->pll = pll;
6608 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6609 &port_dpll->hw_state);
6610 drm_WARN_ON(&dev_priv->drm, !pll_active);
6612 icl_set_active_port_dpll(pipe_config, port_dpll_id);
6615 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6616 struct intel_crtc_state *pipe_config)
6618 struct intel_shared_dpll *pll;
6619 enum intel_dpll_id id;
6623 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
6624 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
6626 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
6629 pll = intel_get_shared_dpll_by_id(dev_priv, id);
6631 pipe_config->shared_dpll = pll;
6632 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6633 &pipe_config->dpll_hw_state);
6634 drm_WARN_ON(&dev_priv->drm, !pll_active);
6637 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
6639 struct intel_crtc_state *pipe_config)
6641 struct intel_shared_dpll *pll;
6642 enum intel_dpll_id id;
6647 id = DPLL_ID_SKL_DPLL0;
6650 id = DPLL_ID_SKL_DPLL1;
6653 id = DPLL_ID_SKL_DPLL2;
6656 drm_err(&dev_priv->drm, "Incorrect port type\n");
6660 pll = intel_get_shared_dpll_by_id(dev_priv, id);
6662 pipe_config->shared_dpll = pll;
6663 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6664 &pipe_config->dpll_hw_state);
6665 drm_WARN_ON(&dev_priv->drm, !pll_active);
6668 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6669 struct intel_crtc_state *pipe_config)
6671 struct intel_shared_dpll *pll;
6672 enum intel_dpll_id id;
6676 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
6677 id = temp >> (port * 3 + 1);
6679 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
6682 pll = intel_get_shared_dpll_by_id(dev_priv, id);
6684 pipe_config->shared_dpll = pll;
6685 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6686 &pipe_config->dpll_hw_state);
6687 drm_WARN_ON(&dev_priv->drm, !pll_active);
6690 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6691 struct intel_crtc_state *pipe_config)
6693 struct intel_shared_dpll *pll;
6694 enum intel_dpll_id id;
6695 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
6698 switch (ddi_pll_sel) {
6699 case PORT_CLK_SEL_WRPLL1:
6700 id = DPLL_ID_WRPLL1;
6702 case PORT_CLK_SEL_WRPLL2:
6703 id = DPLL_ID_WRPLL2;
6705 case PORT_CLK_SEL_SPLL:
6708 case PORT_CLK_SEL_LCPLL_810:
6709 id = DPLL_ID_LCPLL_810;
6711 case PORT_CLK_SEL_LCPLL_1350:
6712 id = DPLL_ID_LCPLL_1350;
6714 case PORT_CLK_SEL_LCPLL_2700:
6715 id = DPLL_ID_LCPLL_2700;
6718 MISSING_CASE(ddi_pll_sel);
6720 case PORT_CLK_SEL_NONE:
6724 pll = intel_get_shared_dpll_by_id(dev_priv, id);
6726 pipe_config->shared_dpll = pll;
6727 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6728 &pipe_config->dpll_hw_state);
6729 drm_WARN_ON(&dev_priv->drm, !pll_active);
6732 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6733 struct intel_crtc_state *pipe_config,
6734 struct intel_display_power_domain_set *power_domain_set)
6736 struct drm_device *dev = crtc->base.dev;
6737 struct drm_i915_private *dev_priv = to_i915(dev);
6738 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6739 unsigned long enabled_panel_transcoders = 0;
6740 enum transcoder panel_transcoder;
6743 if (INTEL_GEN(dev_priv) >= 11)
6744 panel_transcoder_mask |=
6745 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6748 * The pipe->transcoder mapping is fixed with the exception of the eDP
6749 * and DSI transcoders handled below.
6751 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6754 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6755 * consistency and less surprising code; it's in always on power).
6757 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6758 panel_transcoder_mask) {
6759 bool force_thru = false;
6760 enum pipe trans_pipe;
6762 tmp = intel_de_read(dev_priv,
6763 TRANS_DDI_FUNC_CTL(panel_transcoder));
6764 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6768 * Log all enabled ones, only use the first one.
6770 * FIXME: This won't work for two separate DSI displays.
6772 enabled_panel_transcoders |= BIT(panel_transcoder);
6773 if (enabled_panel_transcoders != BIT(panel_transcoder))
6776 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6779 "unknown pipe linked to transcoder %s\n",
6780 transcoder_name(panel_transcoder));
6782 case TRANS_DDI_EDP_INPUT_A_ONOFF:
6785 case TRANS_DDI_EDP_INPUT_A_ON:
6786 trans_pipe = PIPE_A;
6788 case TRANS_DDI_EDP_INPUT_B_ONOFF:
6789 trans_pipe = PIPE_B;
6791 case TRANS_DDI_EDP_INPUT_C_ONOFF:
6792 trans_pipe = PIPE_C;
6794 case TRANS_DDI_EDP_INPUT_D_ONOFF:
6795 trans_pipe = PIPE_D;
6799 if (trans_pipe == crtc->pipe) {
6800 pipe_config->cpu_transcoder = panel_transcoder;
6801 pipe_config->pch_pfit.force_thru = force_thru;
6806 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6808 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6809 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6811 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6812 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6815 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6817 return tmp & PIPECONF_ENABLE;
6820 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6821 struct intel_crtc_state *pipe_config,
6822 struct intel_display_power_domain_set *power_domain_set)
6824 struct drm_device *dev = crtc->base.dev;
6825 struct drm_i915_private *dev_priv = to_i915(dev);
6826 enum transcoder cpu_transcoder;
6830 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6832 cpu_transcoder = TRANSCODER_DSI_A;
6834 cpu_transcoder = TRANSCODER_DSI_C;
6836 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6837 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6841 * The PLL needs to be enabled with a valid divider
6842 * configuration, otherwise accessing DSI registers will hang
6843 * the machine. See BSpec North Display Engine
6844 * registers/MIPI[BXT]. We can break out here early, since we
6845 * need the same DSI PLL to be enabled for both DSI ports.
6847 if (!bxt_dsi_pll_is_enabled(dev_priv))
6850 /* XXX: this works for video mode only */
6851 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6852 if (!(tmp & DPI_ENABLE))
6855 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6856 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6859 pipe_config->cpu_transcoder = cpu_transcoder;
6863 return transcoder_is_dsi(pipe_config->cpu_transcoder);
6866 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6867 struct intel_crtc_state *pipe_config)
6869 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6870 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6874 if (transcoder_is_dsi(cpu_transcoder)) {
6875 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6878 tmp = intel_de_read(dev_priv,
6879 TRANS_DDI_FUNC_CTL(cpu_transcoder));
6880 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6882 if (INTEL_GEN(dev_priv) >= 12)
6883 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6885 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6888 if (IS_DG1(dev_priv))
6889 dg1_get_ddi_pll(dev_priv, port, pipe_config);
6890 else if (INTEL_GEN(dev_priv) >= 11)
6891 icl_get_ddi_pll(dev_priv, port, pipe_config);
6892 else if (IS_CANNONLAKE(dev_priv))
6893 cnl_get_ddi_pll(dev_priv, port, pipe_config);
6894 else if (IS_GEN9_LP(dev_priv))
6895 bxt_get_ddi_pll(dev_priv, port, pipe_config);
6896 else if (IS_GEN9_BC(dev_priv))
6897 skl_get_ddi_pll(dev_priv, port, pipe_config);
6899 hsw_get_ddi_pll(dev_priv, port, pipe_config);
6902 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6903 * DDI E. So just check whether this pipe is wired to DDI E and whether
6904 * the PCH transcoder is on.
6906 if (INTEL_GEN(dev_priv) < 9 &&
6907 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6908 pipe_config->has_pch_encoder = true;
6910 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6911 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6912 FDI_DP_PORT_WIDTH_SHIFT) + 1;
6914 ilk_get_fdi_m_n_config(crtc, pipe_config);
6918 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6919 struct intel_crtc_state *pipe_config)
6921 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6922 struct intel_display_power_domain_set power_domain_set = { };
6926 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6927 POWER_DOMAIN_PIPE(crtc->pipe)))
6930 pipe_config->shared_dpll = NULL;
6932 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6934 if (IS_GEN9_LP(dev_priv) &&
6935 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6936 drm_WARN_ON(&dev_priv->drm, active);
6940 intel_dsc_get_config(pipe_config);
6943 /* bigjoiner slave doesn't enable transcoder */
6944 if (!pipe_config->bigjoiner_slave)
6948 pipe_config->pixel_multiplier = 1;
6950 /* we cannot read out most state, so don't bother.. */
6951 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6952 } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6953 INTEL_GEN(dev_priv) >= 11) {
6954 hsw_get_ddi_port_state(crtc, pipe_config);
6955 intel_get_transcoder_timings(crtc, pipe_config);
6958 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6959 intel_vrr_get_config(crtc, pipe_config);
6961 intel_get_pipe_src_size(crtc, pipe_config);
6963 if (IS_HASWELL(dev_priv)) {
6964 u32 tmp = intel_de_read(dev_priv,
6965 PIPECONF(pipe_config->cpu_transcoder));
6967 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6968 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6970 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6972 pipe_config->output_format =
6973 bdw_get_pipemisc_output_format(crtc);
6976 pipe_config->gamma_mode = intel_de_read(dev_priv,
6977 GAMMA_MODE(crtc->pipe));
6979 pipe_config->csc_mode = intel_de_read(dev_priv,
6980 PIPE_CSC_MODE(crtc->pipe));
6982 if (INTEL_GEN(dev_priv) >= 9) {
6983 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6985 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6986 pipe_config->gamma_enable = true;
6988 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6989 pipe_config->csc_enable = true;
6991 i9xx_get_pipe_color_config(pipe_config);
6994 intel_color_get_config(pipe_config);
6996 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6997 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6998 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6999 pipe_config->ips_linetime =
7000 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
7002 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
7003 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
7004 if (INTEL_GEN(dev_priv) >= 9)
7005 skl_get_pfit_config(pipe_config);
7007 ilk_get_pfit_config(pipe_config);
7010 if (hsw_crtc_supports_ips(crtc)) {
7011 if (IS_HASWELL(dev_priv))
7012 pipe_config->ips_enabled = intel_de_read(dev_priv,
7013 IPS_CTL) & IPS_ENABLE;
7016 * We cannot readout IPS state on broadwell, set to
7017 * true so we can set it to a defined state on first
7020 pipe_config->ips_enabled = true;
7024 if (pipe_config->bigjoiner_slave) {
7025 /* Cannot be read out as a slave, set to 0. */
7026 pipe_config->pixel_multiplier = 0;
7027 } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
7028 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
7029 pipe_config->pixel_multiplier =
7030 intel_de_read(dev_priv,
7031 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7033 pipe_config->pixel_multiplier = 1;
7037 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
7042 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
7044 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7045 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7047 if (!i915->display.get_pipe_config(crtc, crtc_state))
7050 crtc_state->hw.active = true;
7052 intel_crtc_readout_derived_state(crtc_state);
7057 /* VESA 640x480x72Hz mode to set on the pipe */
7058 static const struct drm_display_mode load_detect_mode = {
7059 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7060 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7063 struct drm_framebuffer *
7064 intel_framebuffer_create(struct drm_i915_gem_object *obj,
7065 struct drm_mode_fb_cmd2 *mode_cmd)
7067 struct intel_framebuffer *intel_fb;
7070 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7072 return ERR_PTR(-ENOMEM);
7074 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
7078 return &intel_fb->base;
7082 return ERR_PTR(ret);
7085 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
7086 struct drm_crtc *crtc)
7088 struct drm_plane *plane;
7089 struct drm_plane_state *plane_state;
7092 ret = drm_atomic_add_affected_planes(state, crtc);
7096 for_each_new_plane_in_state(state, plane, plane_state, i) {
7097 if (plane_state->crtc != crtc)
7100 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
7104 drm_atomic_set_fb_for_plane(plane_state, NULL);
7110 int intel_get_load_detect_pipe(struct drm_connector *connector,
7111 struct intel_load_detect_pipe *old,
7112 struct drm_modeset_acquire_ctx *ctx)
7114 struct intel_crtc *intel_crtc;
7115 struct intel_encoder *intel_encoder =
7116 intel_attached_encoder(to_intel_connector(connector));
7117 struct drm_crtc *possible_crtc;
7118 struct drm_encoder *encoder = &intel_encoder->base;
7119 struct drm_crtc *crtc = NULL;
7120 struct drm_device *dev = encoder->dev;
7121 struct drm_i915_private *dev_priv = to_i915(dev);
7122 struct drm_mode_config *config = &dev->mode_config;
7123 struct drm_atomic_state *state = NULL, *restore_state = NULL;
7124 struct drm_connector_state *connector_state;
7125 struct intel_crtc_state *crtc_state;
7128 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7129 connector->base.id, connector->name,
7130 encoder->base.id, encoder->name);
7132 old->restore_state = NULL;
7134 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
7137 * Algorithm gets a little messy:
7139 * - if the connector already has an assigned crtc, use it (but make
7140 * sure it's on first)
7142 * - try to find the first unused crtc that can drive this connector,
7143 * and use that if we find one
7146 /* See if we already have a CRTC for this connector */
7147 if (connector->state->crtc) {
7148 crtc = connector->state->crtc;
7150 ret = drm_modeset_lock(&crtc->mutex, ctx);
7154 /* Make sure the crtc and connector are running */
7158 /* Find an unused one (if possible) */
7159 for_each_crtc(dev, possible_crtc) {
7161 if (!(encoder->possible_crtcs & (1 << i)))
7164 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
7168 if (possible_crtc->state->enable) {
7169 drm_modeset_unlock(&possible_crtc->mutex);
7173 crtc = possible_crtc;
7178 * If we didn't find an unused CRTC, don't use any.
7181 drm_dbg_kms(&dev_priv->drm,
7182 "no pipe available for load-detect\n");
7188 intel_crtc = to_intel_crtc(crtc);
7190 state = drm_atomic_state_alloc(dev);
7191 restore_state = drm_atomic_state_alloc(dev);
7192 if (!state || !restore_state) {
7197 state->acquire_ctx = ctx;
7198 restore_state->acquire_ctx = ctx;
7200 connector_state = drm_atomic_get_connector_state(state, connector);
7201 if (IS_ERR(connector_state)) {
7202 ret = PTR_ERR(connector_state);
7206 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
7210 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7211 if (IS_ERR(crtc_state)) {
7212 ret = PTR_ERR(crtc_state);
7216 crtc_state->uapi.active = true;
7218 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
7223 ret = intel_modeset_disable_planes(state, crtc);
7227 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
7229 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
7231 ret = drm_atomic_add_affected_planes(restore_state, crtc);
7233 drm_dbg_kms(&dev_priv->drm,
7234 "Failed to create a copy of old state to restore: %i\n",
7239 ret = drm_atomic_commit(state);
7241 drm_dbg_kms(&dev_priv->drm,
7242 "failed to set mode on load-detect pipe\n");
7246 old->restore_state = restore_state;
7247 drm_atomic_state_put(state);
7249 /* let the connector get through one full cycle before testing */
7250 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
7255 drm_atomic_state_put(state);
7258 if (restore_state) {
7259 drm_atomic_state_put(restore_state);
7260 restore_state = NULL;
7263 if (ret == -EDEADLK)
7269 void intel_release_load_detect_pipe(struct drm_connector *connector,
7270 struct intel_load_detect_pipe *old,
7271 struct drm_modeset_acquire_ctx *ctx)
7273 struct intel_encoder *intel_encoder =
7274 intel_attached_encoder(to_intel_connector(connector));
7275 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
7276 struct drm_encoder *encoder = &intel_encoder->base;
7277 struct drm_atomic_state *state = old->restore_state;
7280 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7281 connector->base.id, connector->name,
7282 encoder->base.id, encoder->name);
7287 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
7289 drm_dbg_kms(&i915->drm,
7290 "Couldn't release load detect pipe: %i\n", ret);
7291 drm_atomic_state_put(state);
7294 static int i9xx_pll_refclk(struct drm_device *dev,
7295 const struct intel_crtc_state *pipe_config)
7297 struct drm_i915_private *dev_priv = to_i915(dev);
7298 u32 dpll = pipe_config->dpll_hw_state.dpll;
7300 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7301 return dev_priv->vbt.lvds_ssc_freq;
7302 else if (HAS_PCH_SPLIT(dev_priv))
7304 else if (!IS_GEN(dev_priv, 2))
7310 /* Returns the clock of the currently programmed mode of the given pipe. */
7311 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7312 struct intel_crtc_state *pipe_config)
7314 struct drm_device *dev = crtc->base.dev;
7315 struct drm_i915_private *dev_priv = to_i915(dev);
7316 enum pipe pipe = crtc->pipe;
7317 u32 dpll = pipe_config->dpll_hw_state.dpll;
7321 int refclk = i9xx_pll_refclk(dev, pipe_config);
7323 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7324 fp = pipe_config->dpll_hw_state.fp0;
7326 fp = pipe_config->dpll_hw_state.fp1;
7328 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7329 if (IS_PINEVIEW(dev_priv)) {
7330 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7331 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7333 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7334 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7337 if (!IS_GEN(dev_priv, 2)) {
7338 if (IS_PINEVIEW(dev_priv))
7339 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7340 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7342 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7343 DPLL_FPA01_P1_POST_DIV_SHIFT);
7345 switch (dpll & DPLL_MODE_MASK) {
7346 case DPLLB_MODE_DAC_SERIAL:
7347 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7350 case DPLLB_MODE_LVDS:
7351 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7355 drm_dbg_kms(&dev_priv->drm,
7356 "Unknown DPLL mode %08x in programmed "
7357 "mode\n", (int)(dpll & DPLL_MODE_MASK));
7361 if (IS_PINEVIEW(dev_priv))
7362 port_clock = pnv_calc_dpll_params(refclk, &clock);
7364 port_clock = i9xx_calc_dpll_params(refclk, &clock);
7366 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
7368 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7371 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7372 DPLL_FPA01_P1_POST_DIV_SHIFT);
7374 if (lvds & LVDS_CLKB_POWER_UP)
7379 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7382 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7383 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7385 if (dpll & PLL_P2_DIVIDE_BY_4)
7391 port_clock = i9xx_calc_dpll_params(refclk, &clock);
7395 * This value includes pixel_multiplier. We will use
7396 * port_clock to compute adjusted_mode.crtc_clock in the
7397 * encoder's get_config() function.
7399 pipe_config->port_clock = port_clock;
7402 int intel_dotclock_calculate(int link_freq,
7403 const struct intel_link_m_n *m_n)
7406 * The calculation for the data clock is:
7407 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7408 * But we want to avoid losing precison if possible, so:
7409 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7411 * and the link clock is simpler:
7412 * link_clock = (m * link_clock) / n
7418 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
7421 static void ilk_pch_clock_get(struct intel_crtc *crtc,
7422 struct intel_crtc_state *pipe_config)
7424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7426 /* read out port_clock from the DPLL */
7427 i9xx_crtc_clock_get(crtc, pipe_config);
7430 * In case there is an active pipe without active ports,
7431 * we may need some idea for the dotclock anyway.
7432 * Calculate one based on the FDI configuration.
7434 pipe_config->hw.adjusted_mode.crtc_clock =
7435 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
7436 &pipe_config->fdi_m_n);
7439 /* Returns the currently programmed mode of the given encoder. */
7440 struct drm_display_mode *
7441 intel_encoder_current_mode(struct intel_encoder *encoder)
7443 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
7444 struct intel_crtc_state *crtc_state;
7445 struct drm_display_mode *mode;
7446 struct intel_crtc *crtc;
7449 if (!encoder->get_hw_state(encoder, &pipe))
7452 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7454 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7458 crtc_state = intel_crtc_state_alloc(crtc);
7464 if (!intel_crtc_get_pipe_config(crtc_state)) {
7470 intel_encoder_get_config(encoder, crtc_state);
7472 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
7480 * intel_wm_need_update - Check whether watermarks need updating
7481 * @cur: current plane state
7482 * @new: new plane state
7484 * Check current plane state versus the new one to determine whether
7485 * watermarks need to be recalculated.
7487 * Returns true or false.
7489 static bool intel_wm_need_update(const struct intel_plane_state *cur,
7490 struct intel_plane_state *new)
7492 /* Update watermarks on tiling or size changes. */
7493 if (new->uapi.visible != cur->uapi.visible)
7496 if (!cur->hw.fb || !new->hw.fb)
7499 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
7500 cur->hw.rotation != new->hw.rotation ||
7501 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
7502 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
7503 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
7504 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
7510 static bool needs_scaling(const struct intel_plane_state *state)
7512 int src_w = drm_rect_width(&state->uapi.src) >> 16;
7513 int src_h = drm_rect_height(&state->uapi.src) >> 16;
7514 int dst_w = drm_rect_width(&state->uapi.dst);
7515 int dst_h = drm_rect_height(&state->uapi.dst);
7517 return (src_w != dst_w || src_h != dst_h);
7520 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
7521 struct intel_crtc_state *crtc_state,
7522 const struct intel_plane_state *old_plane_state,
7523 struct intel_plane_state *plane_state)
7525 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7526 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7527 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7528 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7529 bool was_crtc_enabled = old_crtc_state->hw.active;
7530 bool is_crtc_enabled = crtc_state->hw.active;
7531 bool turn_off, turn_on, visible, was_visible;
7534 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
7535 ret = skl_update_scaler_plane(crtc_state, plane_state);
7540 was_visible = old_plane_state->uapi.visible;
7541 visible = plane_state->uapi.visible;
7543 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
7544 was_visible = false;
7547 * Visibility is calculated as if the crtc was on, but
7548 * after scaler setup everything depends on it being off
7549 * when the crtc isn't active.
7551 * FIXME this is wrong for watermarks. Watermarks should also
7552 * be computed as if the pipe would be active. Perhaps move
7553 * per-plane wm computation to the .check_plane() hook, and
7554 * only combine the results from all planes in the current place?
7556 if (!is_crtc_enabled) {
7557 intel_plane_set_invisible(crtc_state, plane_state);
7561 if (!was_visible && !visible)
7564 turn_off = was_visible && (!visible || mode_changed);
7565 turn_on = visible && (!was_visible || mode_changed);
7567 drm_dbg_atomic(&dev_priv->drm,
7568 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
7569 crtc->base.base.id, crtc->base.name,
7570 plane->base.base.id, plane->base.name,
7571 was_visible, visible,
7572 turn_off, turn_on, mode_changed);
7575 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7576 crtc_state->update_wm_pre = true;
7578 /* must disable cxsr around plane enable/disable */
7579 if (plane->id != PLANE_CURSOR)
7580 crtc_state->disable_cxsr = true;
7581 } else if (turn_off) {
7582 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7583 crtc_state->update_wm_post = true;
7585 /* must disable cxsr around plane enable/disable */
7586 if (plane->id != PLANE_CURSOR)
7587 crtc_state->disable_cxsr = true;
7588 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
7589 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
7590 /* FIXME bollocks */
7591 crtc_state->update_wm_pre = true;
7592 crtc_state->update_wm_post = true;
7596 if (visible || was_visible)
7597 crtc_state->fb_bits |= plane->frontbuffer_bit;
7600 * ILK/SNB DVSACNTR/Sprite Enable
7601 * IVB SPR_CTL/Sprite Enable
7602 * "When in Self Refresh Big FIFO mode, a write to enable the
7603 * plane will be internally buffered and delayed while Big FIFO
7606 * Which means that enabling the sprite can take an extra frame
7607 * when we start in big FIFO mode (LP1+). Thus we need to drop
7608 * down to LP0 and wait for vblank in order to make sure the
7609 * sprite gets enabled on the next vblank after the register write.
7610 * Doing otherwise would risk enabling the sprite one frame after
7611 * we've already signalled flip completion. We can resume LP1+
7612 * once the sprite has been enabled.
7615 * WaCxSRDisabledForSpriteScaling:ivb
7616 * IVB SPR_SCALE/Scaling Enable
7617 * "Low Power watermarks must be disabled for at least one
7618 * frame before enabling sprite scaling, and kept disabled
7619 * until sprite scaling is disabled."
7621 * ILK/SNB DVSASCALE/Scaling Enable
7622 * "When in Self Refresh Big FIFO mode, scaling enable will be
7623 * masked off while Big FIFO mode is exiting."
7625 * Despite the w/a only being listed for IVB we assume that
7626 * the ILK/SNB note has similar ramifications, hence we apply
7627 * the w/a on all three platforms.
7629 * With experimental results seems this is needed also for primary
7630 * plane, not only sprite plane.
7632 if (plane->id != PLANE_CURSOR &&
7633 (IS_GEN_RANGE(dev_priv, 5, 6) ||
7634 IS_IVYBRIDGE(dev_priv)) &&
7635 (turn_on || (!needs_scaling(old_plane_state) &&
7636 needs_scaling(plane_state))))
7637 crtc_state->disable_lp_wm = true;
7642 static bool encoders_cloneable(const struct intel_encoder *a,
7643 const struct intel_encoder *b)
7645 /* masks could be asymmetric, so check both ways */
7646 return a == b || (a->cloneable & (1 << b->type) &&
7647 b->cloneable & (1 << a->type));
7650 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
7651 struct intel_crtc *crtc,
7652 struct intel_encoder *encoder)
7654 struct intel_encoder *source_encoder;
7655 struct drm_connector *connector;
7656 struct drm_connector_state *connector_state;
7659 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7660 if (connector_state->crtc != &crtc->base)
7664 to_intel_encoder(connector_state->best_encoder);
7665 if (!encoders_cloneable(encoder, source_encoder))
7672 static int icl_add_linked_planes(struct intel_atomic_state *state)
7674 struct intel_plane *plane, *linked;
7675 struct intel_plane_state *plane_state, *linked_plane_state;
7678 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7679 linked = plane_state->planar_linked_plane;
7684 linked_plane_state = intel_atomic_get_plane_state(state, linked);
7685 if (IS_ERR(linked_plane_state))
7686 return PTR_ERR(linked_plane_state);
7688 drm_WARN_ON(state->base.dev,
7689 linked_plane_state->planar_linked_plane != plane);
7690 drm_WARN_ON(state->base.dev,
7691 linked_plane_state->planar_slave == plane_state->planar_slave);
7697 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7699 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7700 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7701 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7702 struct intel_plane *plane, *linked;
7703 struct intel_plane_state *plane_state;
7706 if (INTEL_GEN(dev_priv) < 11)
7710 * Destroy all old plane links and make the slave plane invisible
7711 * in the crtc_state->active_planes mask.
7713 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7714 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7717 plane_state->planar_linked_plane = NULL;
7718 if (plane_state->planar_slave && !plane_state->uapi.visible) {
7719 crtc_state->enabled_planes &= ~BIT(plane->id);
7720 crtc_state->active_planes &= ~BIT(plane->id);
7721 crtc_state->update_planes |= BIT(plane->id);
7724 plane_state->planar_slave = false;
7727 if (!crtc_state->nv12_planes)
7730 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7731 struct intel_plane_state *linked_state = NULL;
7733 if (plane->pipe != crtc->pipe ||
7734 !(crtc_state->nv12_planes & BIT(plane->id)))
7737 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7738 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7741 if (crtc_state->active_planes & BIT(linked->id))
7744 linked_state = intel_atomic_get_plane_state(state, linked);
7745 if (IS_ERR(linked_state))
7746 return PTR_ERR(linked_state);
7751 if (!linked_state) {
7752 drm_dbg_kms(&dev_priv->drm,
7753 "Need %d free Y planes for planar YUV\n",
7754 hweight8(crtc_state->nv12_planes));
7759 plane_state->planar_linked_plane = linked;
7761 linked_state->planar_slave = true;
7762 linked_state->planar_linked_plane = plane;
7763 crtc_state->enabled_planes |= BIT(linked->id);
7764 crtc_state->active_planes |= BIT(linked->id);
7765 crtc_state->update_planes |= BIT(linked->id);
7766 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7767 linked->base.name, plane->base.name);
7769 /* Copy parameters to slave plane */
7770 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7771 linked_state->color_ctl = plane_state->color_ctl;
7772 linked_state->view = plane_state->view;
7773 memcpy(linked_state->color_plane, plane_state->color_plane,
7774 sizeof(linked_state->color_plane));
7776 intel_plane_copy_hw_state(linked_state, plane_state);
7777 linked_state->uapi.src = plane_state->uapi.src;
7778 linked_state->uapi.dst = plane_state->uapi.dst;
7780 if (icl_is_hdr_plane(dev_priv, plane->id)) {
7781 if (linked->id == PLANE_SPRITE5)
7782 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7783 else if (linked->id == PLANE_SPRITE4)
7784 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7785 else if (linked->id == PLANE_SPRITE3)
7786 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7787 else if (linked->id == PLANE_SPRITE2)
7788 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7790 MISSING_CASE(linked->id);
7797 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7799 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7800 struct intel_atomic_state *state =
7801 to_intel_atomic_state(new_crtc_state->uapi.state);
7802 const struct intel_crtc_state *old_crtc_state =
7803 intel_atomic_get_old_crtc_state(state, crtc);
7805 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7808 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7810 const struct drm_display_mode *pipe_mode =
7811 &crtc_state->hw.pipe_mode;
7814 if (!crtc_state->hw.enable)
7817 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7818 pipe_mode->crtc_clock);
7820 return min(linetime_wm, 0x1ff);
7823 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7824 const struct intel_cdclk_state *cdclk_state)
7826 const struct drm_display_mode *pipe_mode =
7827 &crtc_state->hw.pipe_mode;
7830 if (!crtc_state->hw.enable)
7833 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7834 cdclk_state->logical.cdclk);
7836 return min(linetime_wm, 0x1ff);
7839 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7841 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7842 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7843 const struct drm_display_mode *pipe_mode =
7844 &crtc_state->hw.pipe_mode;
7847 if (!crtc_state->hw.enable)
7850 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7851 crtc_state->pixel_rate);
7853 /* Display WA #1135: BXT:ALL GLK:ALL */
7854 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
7857 return min(linetime_wm, 0x1ff);
7860 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7861 struct intel_crtc *crtc)
7863 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7864 struct intel_crtc_state *crtc_state =
7865 intel_atomic_get_new_crtc_state(state, crtc);
7866 const struct intel_cdclk_state *cdclk_state;
7868 if (INTEL_GEN(dev_priv) >= 9)
7869 crtc_state->linetime = skl_linetime_wm(crtc_state);
7871 crtc_state->linetime = hsw_linetime_wm(crtc_state);
7873 if (!hsw_crtc_supports_ips(crtc))
7876 cdclk_state = intel_atomic_get_cdclk_state(state);
7877 if (IS_ERR(cdclk_state))
7878 return PTR_ERR(cdclk_state);
7880 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7886 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7887 struct intel_crtc *crtc)
7889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7890 struct intel_crtc_state *crtc_state =
7891 intel_atomic_get_new_crtc_state(state, crtc);
7892 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7895 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7896 mode_changed && !crtc_state->hw.active)
7897 crtc_state->update_wm_post = true;
7899 if (mode_changed && crtc_state->hw.enable &&
7900 dev_priv->display.crtc_compute_clock &&
7901 !crtc_state->bigjoiner_slave &&
7902 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7903 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7909 * May need to update pipe gamma enable bits
7910 * when C8 planes are getting enabled/disabled.
7912 if (c8_planes_changed(crtc_state))
7913 crtc_state->uapi.color_mgmt_changed = true;
7915 if (mode_changed || crtc_state->update_pipe ||
7916 crtc_state->uapi.color_mgmt_changed) {
7917 ret = intel_color_check(crtc_state);
7922 if (dev_priv->display.compute_pipe_wm) {
7923 ret = dev_priv->display.compute_pipe_wm(crtc_state);
7925 drm_dbg_kms(&dev_priv->drm,
7926 "Target pipe watermarks are invalid\n");
7931 if (dev_priv->display.compute_intermediate_wm) {
7932 if (drm_WARN_ON(&dev_priv->drm,
7933 !dev_priv->display.compute_pipe_wm))
7937 * Calculate 'intermediate' watermarks that satisfy both the
7938 * old state and the new state. We can program these
7941 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
7943 drm_dbg_kms(&dev_priv->drm,
7944 "No valid intermediate pipe watermarks are possible\n");
7949 if (INTEL_GEN(dev_priv) >= 9) {
7950 if (mode_changed || crtc_state->update_pipe) {
7951 ret = skl_update_scaler_crtc(crtc_state);
7956 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7961 if (HAS_IPS(dev_priv)) {
7962 ret = hsw_compute_ips_config(crtc_state);
7967 if (INTEL_GEN(dev_priv) >= 9 ||
7968 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7969 ret = hsw_compute_linetime_wm(state, crtc);
7975 if (!mode_changed) {
7976 ret = intel_psr2_sel_fetch_update(state, crtc);
7984 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7986 struct intel_connector *connector;
7987 struct drm_connector_list_iter conn_iter;
7989 drm_connector_list_iter_begin(dev, &conn_iter);
7990 for_each_intel_connector_iter(connector, &conn_iter) {
7991 if (connector->base.state->crtc)
7992 drm_connector_put(&connector->base);
7994 if (connector->base.encoder) {
7995 connector->base.state->best_encoder =
7996 connector->base.encoder;
7997 connector->base.state->crtc =
7998 connector->base.encoder->crtc;
8000 drm_connector_get(&connector->base);
8002 connector->base.state->best_encoder = NULL;
8003 connector->base.state->crtc = NULL;
8006 drm_connector_list_iter_end(&conn_iter);
8010 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
8011 struct intel_crtc_state *pipe_config)
8013 struct drm_connector *connector = conn_state->connector;
8014 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8015 const struct drm_display_info *info = &connector->display_info;
8018 switch (conn_state->max_bpc) {
8032 MISSING_CASE(conn_state->max_bpc);
8036 if (bpp < pipe_config->pipe_bpp) {
8037 drm_dbg_kms(&i915->drm,
8038 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
8039 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
8040 connector->base.id, connector->name,
8042 3 * conn_state->max_requested_bpc,
8043 pipe_config->pipe_bpp);
8045 pipe_config->pipe_bpp = bpp;
8052 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8053 struct intel_crtc_state *pipe_config)
8055 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8056 struct drm_atomic_state *state = pipe_config->uapi.state;
8057 struct drm_connector *connector;
8058 struct drm_connector_state *connector_state;
8061 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8062 IS_CHERRYVIEW(dev_priv)))
8064 else if (INTEL_GEN(dev_priv) >= 5)
8069 pipe_config->pipe_bpp = bpp;
8071 /* Clamp display bpp to connector max bpp */
8072 for_each_new_connector_in_state(state, connector, connector_state, i) {
8075 if (connector_state->crtc != &crtc->base)
8078 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
8086 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
8087 const struct drm_display_mode *mode)
8089 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
8090 "type: 0x%x flags: 0x%x\n",
8092 mode->crtc_hdisplay, mode->crtc_hsync_start,
8093 mode->crtc_hsync_end, mode->crtc_htotal,
8094 mode->crtc_vdisplay, mode->crtc_vsync_start,
8095 mode->crtc_vsync_end, mode->crtc_vtotal,
8096 mode->type, mode->flags);
8100 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
8101 const char *id, unsigned int lane_count,
8102 const struct intel_link_m_n *m_n)
8104 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8106 drm_dbg_kms(&i915->drm,
8107 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8109 m_n->gmch_m, m_n->gmch_n,
8110 m_n->link_m, m_n->link_n, m_n->tu);
8114 intel_dump_infoframe(struct drm_i915_private *dev_priv,
8115 const union hdmi_infoframe *frame)
8117 if (!drm_debug_enabled(DRM_UT_KMS))
8120 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
8124 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
8125 const struct drm_dp_vsc_sdp *vsc)
8127 if (!drm_debug_enabled(DRM_UT_KMS))
8130 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
8133 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
8135 static const char * const output_type_str[] = {
8136 OUTPUT_TYPE(UNUSED),
8137 OUTPUT_TYPE(ANALOG),
8147 OUTPUT_TYPE(DP_MST),
8152 static void snprintf_output_types(char *buf, size_t len,
8153 unsigned int output_types)
8160 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
8163 if ((output_types & BIT(i)) == 0)
8166 r = snprintf(str, len, "%s%s",
8167 str != buf ? "," : "", output_type_str[i]);
8173 output_types &= ~BIT(i);
8176 WARN_ON_ONCE(output_types != 0);
8179 static const char * const output_format_str[] = {
8180 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
8181 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
8182 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
8183 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
8186 static const char *output_formats(enum intel_output_format format)
8188 if (format >= ARRAY_SIZE(output_format_str))
8189 format = INTEL_OUTPUT_FORMAT_INVALID;
8190 return output_format_str[format];
8193 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
8195 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
8196 struct drm_i915_private *i915 = to_i915(plane->base.dev);
8197 const struct drm_framebuffer *fb = plane_state->hw.fb;
8198 struct drm_format_name_buf format_name;
8201 drm_dbg_kms(&i915->drm,
8202 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
8203 plane->base.base.id, plane->base.name,
8204 yesno(plane_state->uapi.visible));
8208 drm_dbg_kms(&i915->drm,
8209 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
8210 plane->base.base.id, plane->base.name,
8211 fb->base.id, fb->width, fb->height,
8212 drm_get_format_name(fb->format->format, &format_name),
8213 fb->modifier, yesno(plane_state->uapi.visible));
8214 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
8215 plane_state->hw.rotation, plane_state->scaler_id);
8216 if (plane_state->uapi.visible)
8217 drm_dbg_kms(&i915->drm,
8218 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
8219 DRM_RECT_FP_ARG(&plane_state->uapi.src),
8220 DRM_RECT_ARG(&plane_state->uapi.dst));
8223 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
8224 struct intel_atomic_state *state,
8225 const char *context)
8227 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8228 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8229 const struct intel_plane_state *plane_state;
8230 struct intel_plane *plane;
8234 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
8235 crtc->base.base.id, crtc->base.name,
8236 yesno(pipe_config->hw.enable), context);
8238 if (!pipe_config->hw.enable)
8241 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
8242 drm_dbg_kms(&dev_priv->drm,
8243 "active: %s, output_types: %s (0x%x), output format: %s\n",
8244 yesno(pipe_config->hw.active),
8245 buf, pipe_config->output_types,
8246 output_formats(pipe_config->output_format));
8248 drm_dbg_kms(&dev_priv->drm,
8249 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
8250 transcoder_name(pipe_config->cpu_transcoder),
8251 pipe_config->pipe_bpp, pipe_config->dither);
8253 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
8254 transcoder_name(pipe_config->mst_master_transcoder));
8256 drm_dbg_kms(&dev_priv->drm,
8257 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
8258 transcoder_name(pipe_config->master_transcoder),
8259 pipe_config->sync_mode_slaves_mask);
8261 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
8262 pipe_config->bigjoiner_slave ? "slave" :
8263 pipe_config->bigjoiner ? "master" : "no");
8265 if (pipe_config->has_pch_encoder)
8266 intel_dump_m_n_config(pipe_config, "fdi",
8267 pipe_config->fdi_lanes,
8268 &pipe_config->fdi_m_n);
8270 if (intel_crtc_has_dp_encoder(pipe_config)) {
8271 intel_dump_m_n_config(pipe_config, "dp m_n",
8272 pipe_config->lane_count, &pipe_config->dp_m_n);
8273 if (pipe_config->has_drrs)
8274 intel_dump_m_n_config(pipe_config, "dp m2_n2",
8275 pipe_config->lane_count,
8276 &pipe_config->dp_m2_n2);
8279 drm_dbg_kms(&dev_priv->drm,
8280 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
8281 pipe_config->has_audio, pipe_config->has_infoframe,
8282 pipe_config->infoframes.enable);
8284 if (pipe_config->infoframes.enable &
8285 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
8286 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
8287 pipe_config->infoframes.gcp);
8288 if (pipe_config->infoframes.enable &
8289 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
8290 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
8291 if (pipe_config->infoframes.enable &
8292 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
8293 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
8294 if (pipe_config->infoframes.enable &
8295 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
8296 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
8297 if (pipe_config->infoframes.enable &
8298 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
8299 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8300 if (pipe_config->infoframes.enable &
8301 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
8302 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8303 if (pipe_config->infoframes.enable &
8304 intel_hdmi_infoframe_enable(DP_SDP_VSC))
8305 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
8307 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
8308 yesno(pipe_config->vrr.enable),
8309 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
8310 pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
8311 intel_vrr_vmin_vblank_start(pipe_config),
8312 intel_vrr_vmax_vblank_start(pipe_config));
8314 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
8315 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
8316 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
8317 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
8318 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
8319 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
8320 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
8321 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
8322 drm_dbg_kms(&dev_priv->drm,
8323 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
8324 pipe_config->port_clock,
8325 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
8326 pipe_config->pixel_rate);
8328 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
8329 pipe_config->linetime, pipe_config->ips_linetime);
8331 if (INTEL_GEN(dev_priv) >= 9)
8332 drm_dbg_kms(&dev_priv->drm,
8333 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
8335 pipe_config->scaler_state.scaler_users,
8336 pipe_config->scaler_state.scaler_id);
8338 if (HAS_GMCH(dev_priv))
8339 drm_dbg_kms(&dev_priv->drm,
8340 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8341 pipe_config->gmch_pfit.control,
8342 pipe_config->gmch_pfit.pgm_ratios,
8343 pipe_config->gmch_pfit.lvds_border_bits);
8345 drm_dbg_kms(&dev_priv->drm,
8346 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
8347 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
8348 enableddisabled(pipe_config->pch_pfit.enabled),
8349 yesno(pipe_config->pch_pfit.force_thru));
8351 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
8352 pipe_config->ips_enabled, pipe_config->double_wide);
8354 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
8356 if (IS_CHERRYVIEW(dev_priv))
8357 drm_dbg_kms(&dev_priv->drm,
8358 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8359 pipe_config->cgm_mode, pipe_config->gamma_mode,
8360 pipe_config->gamma_enable, pipe_config->csc_enable);
8362 drm_dbg_kms(&dev_priv->drm,
8363 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8364 pipe_config->csc_mode, pipe_config->gamma_mode,
8365 pipe_config->gamma_enable, pipe_config->csc_enable);
8367 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
8368 pipe_config->hw.degamma_lut ?
8369 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
8370 pipe_config->hw.gamma_lut ?
8371 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
8377 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8378 if (plane->pipe == crtc->pipe)
8379 intel_dump_plane_state(plane_state);
8383 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
8385 struct drm_device *dev = state->base.dev;
8386 struct drm_connector *connector;
8387 struct drm_connector_list_iter conn_iter;
8388 unsigned int used_ports = 0;
8389 unsigned int used_mst_ports = 0;
8393 * We're going to peek into connector->state,
8394 * hence connection_mutex must be held.
8396 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
8399 * Walk the connector list instead of the encoder
8400 * list to detect the problem on ddi platforms
8401 * where there's just one encoder per digital port.
8403 drm_connector_list_iter_begin(dev, &conn_iter);
8404 drm_for_each_connector_iter(connector, &conn_iter) {
8405 struct drm_connector_state *connector_state;
8406 struct intel_encoder *encoder;
8409 drm_atomic_get_new_connector_state(&state->base,
8411 if (!connector_state)
8412 connector_state = connector->state;
8414 if (!connector_state->best_encoder)
8417 encoder = to_intel_encoder(connector_state->best_encoder);
8419 drm_WARN_ON(dev, !connector_state->crtc);
8421 switch (encoder->type) {
8422 case INTEL_OUTPUT_DDI:
8423 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
8426 case INTEL_OUTPUT_DP:
8427 case INTEL_OUTPUT_HDMI:
8428 case INTEL_OUTPUT_EDP:
8429 /* the same port mustn't appear more than once */
8430 if (used_ports & BIT(encoder->port))
8433 used_ports |= BIT(encoder->port);
8435 case INTEL_OUTPUT_DP_MST:
8443 drm_connector_list_iter_end(&conn_iter);
8445 /* can't mix MST and SST/HDMI on the same port */
8446 if (used_ports & used_mst_ports)
8453 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
8454 struct intel_crtc_state *crtc_state)
8456 const struct intel_crtc_state *from_crtc_state = crtc_state;
8458 if (crtc_state->bigjoiner_slave) {
8459 from_crtc_state = intel_atomic_get_new_crtc_state(state,
8460 crtc_state->bigjoiner_linked_crtc);
8462 /* No need to copy state if the master state is unchanged */
8463 if (!from_crtc_state)
8467 intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
8471 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
8472 struct intel_crtc_state *crtc_state)
8474 crtc_state->hw.enable = crtc_state->uapi.enable;
8475 crtc_state->hw.active = crtc_state->uapi.active;
8476 crtc_state->hw.mode = crtc_state->uapi.mode;
8477 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
8478 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
8480 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
8483 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
8485 if (crtc_state->bigjoiner_slave)
8488 crtc_state->uapi.enable = crtc_state->hw.enable;
8489 crtc_state->uapi.active = crtc_state->hw.active;
8490 drm_WARN_ON(crtc_state->uapi.crtc->dev,
8491 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
8493 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
8494 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
8496 /* copy color blobs to uapi */
8497 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
8498 crtc_state->hw.degamma_lut);
8499 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
8500 crtc_state->hw.gamma_lut);
8501 drm_property_replace_blob(&crtc_state->uapi.ctm,
8502 crtc_state->hw.ctm);
8506 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
8507 const struct intel_crtc_state *from_crtc_state)
8509 struct intel_crtc_state *saved_state;
8510 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8512 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
8516 saved_state->uapi = crtc_state->uapi;
8517 saved_state->scaler_state = crtc_state->scaler_state;
8518 saved_state->shared_dpll = crtc_state->shared_dpll;
8519 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8520 saved_state->crc_enabled = crtc_state->crc_enabled;
8522 intel_crtc_free_hw_state(crtc_state);
8523 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8526 /* Re-init hw state */
8527 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
8528 crtc_state->hw.enable = from_crtc_state->hw.enable;
8529 crtc_state->hw.active = from_crtc_state->hw.active;
8530 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
8531 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
8534 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
8535 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
8536 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
8537 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
8538 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
8539 crtc_state->bigjoiner_slave = true;
8540 crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
8541 crtc_state->has_audio = false;
8547 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
8548 struct intel_crtc_state *crtc_state)
8550 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8551 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8552 struct intel_crtc_state *saved_state;
8554 saved_state = intel_crtc_state_alloc(crtc);
8558 /* free the old crtc_state->hw members */
8559 intel_crtc_free_hw_state(crtc_state);
8561 /* FIXME: before the switch to atomic started, a new pipe_config was
8562 * kzalloc'd. Code that depends on any field being zero should be
8563 * fixed, so that the crtc_state can be safely duplicated. For now,
8564 * only fields that are know to not cause problems are preserved. */
8566 saved_state->uapi = crtc_state->uapi;
8567 saved_state->scaler_state = crtc_state->scaler_state;
8568 saved_state->shared_dpll = crtc_state->shared_dpll;
8569 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8570 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
8571 sizeof(saved_state->icl_port_dplls));
8572 saved_state->crc_enabled = crtc_state->crc_enabled;
8573 if (IS_G4X(dev_priv) ||
8574 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8575 saved_state->wm = crtc_state->wm;
8577 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8580 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
8586 intel_modeset_pipe_config(struct intel_atomic_state *state,
8587 struct intel_crtc_state *pipe_config)
8589 struct drm_crtc *crtc = pipe_config->uapi.crtc;
8590 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8591 struct drm_connector *connector;
8592 struct drm_connector_state *connector_state;
8593 int base_bpp, ret, i;
8596 pipe_config->cpu_transcoder =
8597 (enum transcoder) to_intel_crtc(crtc)->pipe;
8600 * Sanitize sync polarity flags based on requested ones. If neither
8601 * positive or negative polarity is requested, treat this as meaning
8602 * negative polarity.
8604 if (!(pipe_config->hw.adjusted_mode.flags &
8605 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8606 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8608 if (!(pipe_config->hw.adjusted_mode.flags &
8609 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8610 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8612 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8617 base_bpp = pipe_config->pipe_bpp;
8620 * Determine the real pipe dimensions. Note that stereo modes can
8621 * increase the actual pipe size due to the frame doubling and
8622 * insertion of additional space for blanks between the frame. This
8623 * is stored in the crtc timings. We use the requested mode to do this
8624 * computation to clearly distinguish it from the adjusted mode, which
8625 * can be changed by the connectors in the below retry loop.
8627 drm_mode_get_hv_timing(&pipe_config->hw.mode,
8628 &pipe_config->pipe_src_w,
8629 &pipe_config->pipe_src_h);
8631 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8632 struct intel_encoder *encoder =
8633 to_intel_encoder(connector_state->best_encoder);
8635 if (connector_state->crtc != crtc)
8638 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
8639 drm_dbg_kms(&i915->drm,
8640 "rejecting invalid cloning configuration\n");
8645 * Determine output_types before calling the .compute_config()
8646 * hooks so that the hooks can use this information safely.
8648 if (encoder->compute_output_type)
8649 pipe_config->output_types |=
8650 BIT(encoder->compute_output_type(encoder, pipe_config,
8653 pipe_config->output_types |= BIT(encoder->type);
8657 /* Ensure the port clock defaults are reset when retrying. */
8658 pipe_config->port_clock = 0;
8659 pipe_config->pixel_multiplier = 1;
8661 /* Fill in default crtc timings, allow encoders to overwrite them. */
8662 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
8663 CRTC_STEREO_DOUBLE);
8665 /* Pass our mode to the connectors and the CRTC to give them a chance to
8666 * adjust it according to limitations or connector properties, and also
8667 * a chance to reject the mode entirely.
8669 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8670 struct intel_encoder *encoder =
8671 to_intel_encoder(connector_state->best_encoder);
8673 if (connector_state->crtc != crtc)
8676 ret = encoder->compute_config(encoder, pipe_config,
8679 if (ret != -EDEADLK)
8680 drm_dbg_kms(&i915->drm,
8681 "Encoder config failure: %d\n",
8687 /* Set default port clock if not overwritten by the encoder. Needs to be
8688 * done afterwards in case the encoder adjusts the mode. */
8689 if (!pipe_config->port_clock)
8690 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8691 * pipe_config->pixel_multiplier;
8693 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8694 if (ret == -EDEADLK)
8697 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8701 if (ret == I915_DISPLAY_CONFIG_RETRY) {
8702 if (drm_WARN(&i915->drm, !retry,
8703 "loop in pipe configuration computation\n"))
8706 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8711 /* Dithering seems to not pass-through bits correctly when it should, so
8712 * only enable it on 6bpc panels and when its not a compliance
8713 * test requesting 6bpc video pattern.
8715 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8716 !pipe_config->dither_force_disable;
8717 drm_dbg_kms(&i915->drm,
8718 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8719 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8725 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8727 struct intel_atomic_state *state =
8728 to_intel_atomic_state(crtc_state->uapi.state);
8729 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8730 struct drm_connector_state *conn_state;
8731 struct drm_connector *connector;
8734 for_each_new_connector_in_state(&state->base, connector,
8736 struct intel_encoder *encoder =
8737 to_intel_encoder(conn_state->best_encoder);
8740 if (conn_state->crtc != &crtc->base ||
8741 !encoder->compute_config_late)
8744 ret = encoder->compute_config_late(encoder, crtc_state,
8753 bool intel_fuzzy_clock_check(int clock1, int clock2)
8757 if (clock1 == clock2)
8760 if (!clock1 || !clock2)
8763 diff = abs(clock1 - clock2);
8765 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8772 intel_compare_m_n(unsigned int m, unsigned int n,
8773 unsigned int m2, unsigned int n2,
8776 if (m == m2 && n == n2)
8779 if (exact || !m || !n || !m2 || !n2)
8782 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8789 } else if (n < n2) {
8799 return intel_fuzzy_clock_check(m, m2);
8803 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8804 const struct intel_link_m_n *m2_n2,
8807 return m_n->tu == m2_n2->tu &&
8808 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8809 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8810 intel_compare_m_n(m_n->link_m, m_n->link_n,
8811 m2_n2->link_m, m2_n2->link_n, exact);
8815 intel_compare_infoframe(const union hdmi_infoframe *a,
8816 const union hdmi_infoframe *b)
8818 return memcmp(a, b, sizeof(*a)) == 0;
8822 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8823 const struct drm_dp_vsc_sdp *b)
8825 return memcmp(a, b, sizeof(*a)) == 0;
8829 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8830 bool fastset, const char *name,
8831 const union hdmi_infoframe *a,
8832 const union hdmi_infoframe *b)
8835 if (!drm_debug_enabled(DRM_UT_KMS))
8838 drm_dbg_kms(&dev_priv->drm,
8839 "fastset mismatch in %s infoframe\n", name);
8840 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8841 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8842 drm_dbg_kms(&dev_priv->drm, "found:\n");
8843 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8845 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8846 drm_err(&dev_priv->drm, "expected:\n");
8847 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8848 drm_err(&dev_priv->drm, "found:\n");
8849 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8854 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8855 bool fastset, const char *name,
8856 const struct drm_dp_vsc_sdp *a,
8857 const struct drm_dp_vsc_sdp *b)
8860 if (!drm_debug_enabled(DRM_UT_KMS))
8863 drm_dbg_kms(&dev_priv->drm,
8864 "fastset mismatch in %s dp sdp\n", name);
8865 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8866 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8867 drm_dbg_kms(&dev_priv->drm, "found:\n");
8868 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8870 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8871 drm_err(&dev_priv->drm, "expected:\n");
8872 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8873 drm_err(&dev_priv->drm, "found:\n");
8874 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8878 static void __printf(4, 5)
8879 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8880 const char *name, const char *format, ...)
8882 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8883 struct va_format vaf;
8886 va_start(args, format);
8891 drm_dbg_kms(&i915->drm,
8892 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8893 crtc->base.base.id, crtc->base.name, name, &vaf);
8895 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8896 crtc->base.base.id, crtc->base.name, name, &vaf);
8901 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8903 if (dev_priv->params.fastboot != -1)
8904 return dev_priv->params.fastboot;
8906 /* Enable fastboot by default on Skylake and newer */
8907 if (INTEL_GEN(dev_priv) >= 9)
8910 /* Enable fastboot by default on VLV and CHV */
8911 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8914 /* Disabled by default on all others */
8919 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8920 const struct intel_crtc_state *pipe_config,
8923 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8924 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8927 bool fixup_inherited = fastset &&
8928 current_config->inherited && !pipe_config->inherited;
8930 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8931 drm_dbg_kms(&dev_priv->drm,
8932 "initial modeset and fastboot not set\n");
8936 #define PIPE_CONF_CHECK_X(name) do { \
8937 if (current_config->name != pipe_config->name) { \
8938 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8939 "(expected 0x%08x, found 0x%08x)", \
8940 current_config->name, \
8941 pipe_config->name); \
8946 #define PIPE_CONF_CHECK_I(name) do { \
8947 if (current_config->name != pipe_config->name) { \
8948 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8949 "(expected %i, found %i)", \
8950 current_config->name, \
8951 pipe_config->name); \
8956 #define PIPE_CONF_CHECK_BOOL(name) do { \
8957 if (current_config->name != pipe_config->name) { \
8958 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8959 "(expected %s, found %s)", \
8960 yesno(current_config->name), \
8961 yesno(pipe_config->name)); \
8967 * Checks state where we only read out the enabling, but not the entire
8968 * state itself (like full infoframes or ELD for audio). These states
8969 * require a full modeset on bootup to fix up.
8971 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8972 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8973 PIPE_CONF_CHECK_BOOL(name); \
8975 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8976 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8977 yesno(current_config->name), \
8978 yesno(pipe_config->name)); \
8983 #define PIPE_CONF_CHECK_P(name) do { \
8984 if (current_config->name != pipe_config->name) { \
8985 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8986 "(expected %p, found %p)", \
8987 current_config->name, \
8988 pipe_config->name); \
8993 #define PIPE_CONF_CHECK_M_N(name) do { \
8994 if (!intel_compare_link_m_n(¤t_config->name, \
8995 &pipe_config->name,\
8997 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8998 "(expected tu %i gmch %i/%i link %i/%i, " \
8999 "found tu %i, gmch %i/%i link %i/%i)", \
9000 current_config->name.tu, \
9001 current_config->name.gmch_m, \
9002 current_config->name.gmch_n, \
9003 current_config->name.link_m, \
9004 current_config->name.link_n, \
9005 pipe_config->name.tu, \
9006 pipe_config->name.gmch_m, \
9007 pipe_config->name.gmch_n, \
9008 pipe_config->name.link_m, \
9009 pipe_config->name.link_n); \
9014 /* This is required for BDW+ where there is only one set of registers for
9015 * switching between high and low RR.
9016 * This macro can be used whenever a comparison has to be made between one
9017 * hw state and multiple sw state variables.
9019 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
9020 if (!intel_compare_link_m_n(¤t_config->name, \
9021 &pipe_config->name, !fastset) && \
9022 !intel_compare_link_m_n(¤t_config->alt_name, \
9023 &pipe_config->name, !fastset)) { \
9024 pipe_config_mismatch(fastset, crtc, __stringify(name), \
9025 "(expected tu %i gmch %i/%i link %i/%i, " \
9026 "or tu %i gmch %i/%i link %i/%i, " \
9027 "found tu %i, gmch %i/%i link %i/%i)", \
9028 current_config->name.tu, \
9029 current_config->name.gmch_m, \
9030 current_config->name.gmch_n, \
9031 current_config->name.link_m, \
9032 current_config->name.link_n, \
9033 current_config->alt_name.tu, \
9034 current_config->alt_name.gmch_m, \
9035 current_config->alt_name.gmch_n, \
9036 current_config->alt_name.link_m, \
9037 current_config->alt_name.link_n, \
9038 pipe_config->name.tu, \
9039 pipe_config->name.gmch_m, \
9040 pipe_config->name.gmch_n, \
9041 pipe_config->name.link_m, \
9042 pipe_config->name.link_n); \
9047 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
9048 if ((current_config->name ^ pipe_config->name) & (mask)) { \
9049 pipe_config_mismatch(fastset, crtc, __stringify(name), \
9050 "(%x) (expected %i, found %i)", \
9052 current_config->name & (mask), \
9053 pipe_config->name & (mask)); \
9058 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
9059 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9060 pipe_config_mismatch(fastset, crtc, __stringify(name), \
9061 "(expected %i, found %i)", \
9062 current_config->name, \
9063 pipe_config->name); \
9068 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
9069 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
9070 &pipe_config->infoframes.name)) { \
9071 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
9072 ¤t_config->infoframes.name, \
9073 &pipe_config->infoframes.name); \
9078 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
9079 if (!current_config->has_psr && !pipe_config->has_psr && \
9080 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
9081 &pipe_config->infoframes.name)) { \
9082 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
9083 ¤t_config->infoframes.name, \
9084 &pipe_config->infoframes.name); \
9089 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
9090 if (current_config->name1 != pipe_config->name1) { \
9091 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
9092 "(expected %i, found %i, won't compare lut values)", \
9093 current_config->name1, \
9094 pipe_config->name1); \
9097 if (!intel_color_lut_equal(current_config->name2, \
9098 pipe_config->name2, pipe_config->name1, \
9100 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
9101 "hw_state doesn't match sw_state"); \
9107 #define PIPE_CONF_QUIRK(quirk) \
9108 ((current_config->quirks | pipe_config->quirks) & (quirk))
9110 PIPE_CONF_CHECK_I(cpu_transcoder);
9112 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
9113 PIPE_CONF_CHECK_I(fdi_lanes);
9114 PIPE_CONF_CHECK_M_N(fdi_m_n);
9116 PIPE_CONF_CHECK_I(lane_count);
9117 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
9119 if (INTEL_GEN(dev_priv) < 8) {
9120 PIPE_CONF_CHECK_M_N(dp_m_n);
9122 if (current_config->has_drrs)
9123 PIPE_CONF_CHECK_M_N(dp_m2_n2);
9125 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
9127 PIPE_CONF_CHECK_X(output_types);
9129 /* FIXME do the readout properly and get rid of this quirk */
9130 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
9131 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
9132 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
9133 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
9134 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
9135 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
9136 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
9138 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
9139 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
9140 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
9141 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
9142 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
9143 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
9145 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
9146 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
9147 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
9148 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
9149 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
9150 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
9152 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
9153 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
9154 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
9155 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
9156 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
9157 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
9159 PIPE_CONF_CHECK_I(pixel_multiplier);
9161 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9162 DRM_MODE_FLAG_INTERLACE);
9164 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9165 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9166 DRM_MODE_FLAG_PHSYNC);
9167 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9168 DRM_MODE_FLAG_NHSYNC);
9169 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9170 DRM_MODE_FLAG_PVSYNC);
9171 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9172 DRM_MODE_FLAG_NVSYNC);
9176 PIPE_CONF_CHECK_I(output_format);
9177 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
9178 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
9179 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9180 PIPE_CONF_CHECK_BOOL(limited_color_range);
9182 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
9183 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
9184 PIPE_CONF_CHECK_BOOL(has_infoframe);
9185 /* FIXME do the readout properly and get rid of this quirk */
9186 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9187 PIPE_CONF_CHECK_BOOL(fec_enable);
9189 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
9191 PIPE_CONF_CHECK_X(gmch_pfit.control);
9192 /* pfit ratios are autocomputed by the hw on gen4+ */
9193 if (INTEL_GEN(dev_priv) < 4)
9194 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
9195 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9198 * Changing the EDP transcoder input mux
9199 * (A_ONOFF vs. A_ON) requires a full modeset.
9201 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
9204 PIPE_CONF_CHECK_I(pipe_src_w);
9205 PIPE_CONF_CHECK_I(pipe_src_h);
9207 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
9208 if (current_config->pch_pfit.enabled) {
9209 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
9210 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
9211 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
9212 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
9215 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
9216 /* FIXME do the readout properly and get rid of this quirk */
9217 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9218 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
9220 PIPE_CONF_CHECK_X(gamma_mode);
9221 if (IS_CHERRYVIEW(dev_priv))
9222 PIPE_CONF_CHECK_X(cgm_mode);
9224 PIPE_CONF_CHECK_X(csc_mode);
9225 PIPE_CONF_CHECK_BOOL(gamma_enable);
9226 PIPE_CONF_CHECK_BOOL(csc_enable);
9228 PIPE_CONF_CHECK_I(linetime);
9229 PIPE_CONF_CHECK_I(ips_linetime);
9231 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
9233 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
9236 PIPE_CONF_CHECK_BOOL(double_wide);
9238 PIPE_CONF_CHECK_P(shared_dpll);
9240 /* FIXME do the readout properly and get rid of this quirk */
9241 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
9242 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9243 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9244 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9245 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9246 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
9247 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
9248 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
9249 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
9250 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
9251 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
9252 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
9253 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
9254 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
9255 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
9256 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
9257 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
9258 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
9259 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
9260 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
9261 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
9262 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
9263 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
9264 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
9265 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
9266 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
9267 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
9268 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
9269 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
9270 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
9271 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
9272 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
9274 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
9275 PIPE_CONF_CHECK_X(dsi_pll.div);
9277 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
9278 PIPE_CONF_CHECK_I(pipe_bpp);
9280 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
9281 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
9282 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9284 PIPE_CONF_CHECK_I(min_voltage_level);
9287 PIPE_CONF_CHECK_X(infoframes.enable);
9288 PIPE_CONF_CHECK_X(infoframes.gcp);
9289 PIPE_CONF_CHECK_INFOFRAME(avi);
9290 PIPE_CONF_CHECK_INFOFRAME(spd);
9291 PIPE_CONF_CHECK_INFOFRAME(hdmi);
9292 PIPE_CONF_CHECK_INFOFRAME(drm);
9293 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
9295 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
9296 PIPE_CONF_CHECK_I(master_transcoder);
9297 PIPE_CONF_CHECK_BOOL(bigjoiner);
9298 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
9299 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
9301 PIPE_CONF_CHECK_I(dsc.compression_enable);
9302 PIPE_CONF_CHECK_I(dsc.dsc_split);
9303 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
9305 PIPE_CONF_CHECK_I(mst_master_transcoder);
9307 PIPE_CONF_CHECK_BOOL(vrr.enable);
9308 PIPE_CONF_CHECK_I(vrr.vmin);
9309 PIPE_CONF_CHECK_I(vrr.vmax);
9310 PIPE_CONF_CHECK_I(vrr.flipline);
9311 PIPE_CONF_CHECK_I(vrr.pipeline_full);
9313 #undef PIPE_CONF_CHECK_X
9314 #undef PIPE_CONF_CHECK_I
9315 #undef PIPE_CONF_CHECK_BOOL
9316 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
9317 #undef PIPE_CONF_CHECK_P
9318 #undef PIPE_CONF_CHECK_FLAGS
9319 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
9320 #undef PIPE_CONF_CHECK_COLOR_LUT
9321 #undef PIPE_CONF_QUIRK
9326 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
9327 const struct intel_crtc_state *pipe_config)
9329 if (pipe_config->has_pch_encoder) {
9330 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
9331 &pipe_config->fdi_m_n);
9332 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
9335 * FDI already provided one idea for the dotclock.
9336 * Yell if the encoder disagrees.
9338 drm_WARN(&dev_priv->drm,
9339 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
9340 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9341 fdi_dotclock, dotclock);
9345 static void verify_wm_state(struct intel_crtc *crtc,
9346 struct intel_crtc_state *new_crtc_state)
9348 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9349 struct skl_hw_state {
9350 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
9351 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
9352 struct skl_pipe_wm wm;
9354 struct skl_pipe_wm *sw_wm;
9355 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
9356 u8 hw_enabled_slices;
9357 const enum pipe pipe = crtc->pipe;
9358 int plane, level, max_level = ilk_wm_max_level(dev_priv);
9360 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
9363 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
9367 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
9368 sw_wm = &new_crtc_state->wm.skl.optimal;
9370 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
9372 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
9374 if (INTEL_GEN(dev_priv) >= 11 &&
9375 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
9376 drm_err(&dev_priv->drm,
9377 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
9378 dev_priv->dbuf.enabled_slices,
9382 for_each_universal_plane(dev_priv, pipe, plane) {
9383 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
9385 hw_plane_wm = &hw->wm.planes[plane];
9386 sw_plane_wm = &sw_wm->planes[plane];
9389 for (level = 0; level <= max_level; level++) {
9390 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
9391 &sw_plane_wm->wm[level]) ||
9392 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
9393 &sw_plane_wm->sagv_wm0)))
9396 drm_err(&dev_priv->drm,
9397 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9398 pipe_name(pipe), plane + 1, level,
9399 sw_plane_wm->wm[level].plane_en,
9400 sw_plane_wm->wm[level].plane_res_b,
9401 sw_plane_wm->wm[level].plane_res_l,
9402 hw_plane_wm->wm[level].plane_en,
9403 hw_plane_wm->wm[level].plane_res_b,
9404 hw_plane_wm->wm[level].plane_res_l);
9407 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
9408 &sw_plane_wm->trans_wm)) {
9409 drm_err(&dev_priv->drm,
9410 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9411 pipe_name(pipe), plane + 1,
9412 sw_plane_wm->trans_wm.plane_en,
9413 sw_plane_wm->trans_wm.plane_res_b,
9414 sw_plane_wm->trans_wm.plane_res_l,
9415 hw_plane_wm->trans_wm.plane_en,
9416 hw_plane_wm->trans_wm.plane_res_b,
9417 hw_plane_wm->trans_wm.plane_res_l);
9421 hw_ddb_entry = &hw->ddb_y[plane];
9422 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
9424 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
9425 drm_err(&dev_priv->drm,
9426 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
9427 pipe_name(pipe), plane + 1,
9428 sw_ddb_entry->start, sw_ddb_entry->end,
9429 hw_ddb_entry->start, hw_ddb_entry->end);
9435 * If the cursor plane isn't active, we may not have updated it's ddb
9436 * allocation. In that case since the ddb allocation will be updated
9437 * once the plane becomes visible, we can skip this check
9440 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
9442 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
9443 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
9446 for (level = 0; level <= max_level; level++) {
9447 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
9448 &sw_plane_wm->wm[level]) ||
9449 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
9450 &sw_plane_wm->sagv_wm0)))
9453 drm_err(&dev_priv->drm,
9454 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9455 pipe_name(pipe), level,
9456 sw_plane_wm->wm[level].plane_en,
9457 sw_plane_wm->wm[level].plane_res_b,
9458 sw_plane_wm->wm[level].plane_res_l,
9459 hw_plane_wm->wm[level].plane_en,
9460 hw_plane_wm->wm[level].plane_res_b,
9461 hw_plane_wm->wm[level].plane_res_l);
9464 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
9465 &sw_plane_wm->trans_wm)) {
9466 drm_err(&dev_priv->drm,
9467 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9469 sw_plane_wm->trans_wm.plane_en,
9470 sw_plane_wm->trans_wm.plane_res_b,
9471 sw_plane_wm->trans_wm.plane_res_l,
9472 hw_plane_wm->trans_wm.plane_en,
9473 hw_plane_wm->trans_wm.plane_res_b,
9474 hw_plane_wm->trans_wm.plane_res_l);
9478 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
9479 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
9481 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
9482 drm_err(&dev_priv->drm,
9483 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
9485 sw_ddb_entry->start, sw_ddb_entry->end,
9486 hw_ddb_entry->start, hw_ddb_entry->end);
9494 verify_connector_state(struct intel_atomic_state *state,
9495 struct intel_crtc *crtc)
9497 struct drm_connector *connector;
9498 struct drm_connector_state *new_conn_state;
9501 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
9502 struct drm_encoder *encoder = connector->encoder;
9503 struct intel_crtc_state *crtc_state = NULL;
9505 if (new_conn_state->crtc != &crtc->base)
9509 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
9511 intel_connector_verify_state(crtc_state, new_conn_state);
9513 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
9514 "connector's atomic encoder doesn't match legacy encoder\n");
9519 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
9521 struct intel_encoder *encoder;
9522 struct drm_connector *connector;
9523 struct drm_connector_state *old_conn_state, *new_conn_state;
9526 for_each_intel_encoder(&dev_priv->drm, encoder) {
9527 bool enabled = false, found = false;
9530 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
9531 encoder->base.base.id,
9532 encoder->base.name);
9534 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
9535 new_conn_state, i) {
9536 if (old_conn_state->best_encoder == &encoder->base)
9539 if (new_conn_state->best_encoder != &encoder->base)
9541 found = enabled = true;
9543 I915_STATE_WARN(new_conn_state->crtc !=
9545 "connector's crtc doesn't match encoder crtc\n");
9551 I915_STATE_WARN(!!encoder->base.crtc != enabled,
9552 "encoder's enabled state mismatch "
9553 "(expected %i, found %i)\n",
9554 !!encoder->base.crtc, enabled);
9556 if (!encoder->base.crtc) {
9559 active = encoder->get_hw_state(encoder, &pipe);
9560 I915_STATE_WARN(active,
9561 "encoder detached but still enabled on pipe %c.\n",
9568 verify_crtc_state(struct intel_crtc *crtc,
9569 struct intel_crtc_state *old_crtc_state,
9570 struct intel_crtc_state *new_crtc_state)
9572 struct drm_device *dev = crtc->base.dev;
9573 struct drm_i915_private *dev_priv = to_i915(dev);
9574 struct intel_encoder *encoder;
9575 struct intel_crtc_state *pipe_config = old_crtc_state;
9576 struct drm_atomic_state *state = old_crtc_state->uapi.state;
9577 struct intel_crtc *master = crtc;
9579 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
9580 intel_crtc_free_hw_state(old_crtc_state);
9581 intel_crtc_state_reset(old_crtc_state, crtc);
9582 old_crtc_state->uapi.state = state;
9584 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
9587 pipe_config->hw.enable = new_crtc_state->hw.enable;
9589 intel_crtc_get_pipe_config(pipe_config);
9591 /* we keep both pipes enabled on 830 */
9592 if (IS_I830(dev_priv) && pipe_config->hw.active)
9593 pipe_config->hw.active = new_crtc_state->hw.active;
9595 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
9596 "crtc active state doesn't match with hw state "
9597 "(expected %i, found %i)\n",
9598 new_crtc_state->hw.active, pipe_config->hw.active);
9600 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
9601 "transitional active state does not match atomic hw state "
9602 "(expected %i, found %i)\n",
9603 new_crtc_state->hw.active, crtc->active);
9605 if (new_crtc_state->bigjoiner_slave)
9606 master = new_crtc_state->bigjoiner_linked_crtc;
9608 for_each_encoder_on_crtc(dev, &master->base, encoder) {
9612 active = encoder->get_hw_state(encoder, &pipe);
9613 I915_STATE_WARN(active != new_crtc_state->hw.active,
9614 "[ENCODER:%i] active %i with crtc active %i\n",
9615 encoder->base.base.id, active,
9616 new_crtc_state->hw.active);
9618 I915_STATE_WARN(active && master->pipe != pipe,
9619 "Encoder connected to wrong pipe %c\n",
9623 intel_encoder_get_config(encoder, pipe_config);
9626 if (!new_crtc_state->hw.active)
9629 intel_pipe_config_sanity_check(dev_priv, pipe_config);
9631 if (!intel_pipe_config_compare(new_crtc_state,
9632 pipe_config, false)) {
9633 I915_STATE_WARN(1, "pipe state doesn't match!\n");
9634 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
9635 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
9640 intel_verify_planes(struct intel_atomic_state *state)
9642 struct intel_plane *plane;
9643 const struct intel_plane_state *plane_state;
9646 for_each_new_intel_plane_in_state(state, plane,
9648 assert_plane(plane, plane_state->planar_slave ||
9649 plane_state->uapi.visible);
9653 verify_single_dpll_state(struct drm_i915_private *dev_priv,
9654 struct intel_shared_dpll *pll,
9655 struct intel_crtc *crtc,
9656 struct intel_crtc_state *new_crtc_state)
9658 struct intel_dpll_hw_state dpll_hw_state;
9659 unsigned int crtc_mask;
9662 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9664 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
9666 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
9668 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
9669 I915_STATE_WARN(!pll->on && pll->active_mask,
9670 "pll in active use but not on in sw tracking\n");
9671 I915_STATE_WARN(pll->on && !pll->active_mask,
9672 "pll is on but not used by any active crtc\n");
9673 I915_STATE_WARN(pll->on != active,
9674 "pll on state mismatch (expected %i, found %i)\n",
9679 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
9680 "more active pll users than references: %x vs %x\n",
9681 pll->active_mask, pll->state.crtc_mask);
9686 crtc_mask = drm_crtc_mask(&crtc->base);
9688 if (new_crtc_state->hw.active)
9689 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
9690 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
9691 pipe_name(crtc->pipe), pll->active_mask);
9693 I915_STATE_WARN(pll->active_mask & crtc_mask,
9694 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
9695 pipe_name(crtc->pipe), pll->active_mask);
9697 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
9698 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
9699 crtc_mask, pll->state.crtc_mask);
9701 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
9703 sizeof(dpll_hw_state)),
9704 "pll hw state mismatch\n");
9708 verify_shared_dpll_state(struct intel_crtc *crtc,
9709 struct intel_crtc_state *old_crtc_state,
9710 struct intel_crtc_state *new_crtc_state)
9712 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9714 if (new_crtc_state->shared_dpll)
9715 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
9717 if (old_crtc_state->shared_dpll &&
9718 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
9719 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
9720 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
9722 I915_STATE_WARN(pll->active_mask & crtc_mask,
9723 "pll active mismatch (didn't expect pipe %c in active mask)\n",
9724 pipe_name(crtc->pipe));
9725 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
9726 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
9727 pipe_name(crtc->pipe));
9732 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9733 struct intel_atomic_state *state,
9734 struct intel_crtc_state *old_crtc_state,
9735 struct intel_crtc_state *new_crtc_state)
9737 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9740 verify_wm_state(crtc, new_crtc_state);
9741 verify_connector_state(state, crtc);
9742 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9743 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9747 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9751 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9752 verify_single_dpll_state(dev_priv,
9753 &dev_priv->dpll.shared_dplls[i],
9758 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9759 struct intel_atomic_state *state)
9761 verify_encoder_state(dev_priv, state);
9762 verify_connector_state(state, NULL);
9763 verify_disabled_dpll_state(dev_priv);
9767 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9769 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9771 struct drm_display_mode adjusted_mode =
9772 crtc_state->hw.adjusted_mode;
9774 if (crtc_state->vrr.enable) {
9775 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9776 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9777 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9778 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9781 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9783 crtc->mode_flags = crtc_state->mode_flags;
9786 * The scanline counter increments at the leading edge of hsync.
9788 * On most platforms it starts counting from vtotal-1 on the
9789 * first active line. That means the scanline counter value is
9790 * always one less than what we would expect. Ie. just after
9791 * start of vblank, which also occurs at start of hsync (on the
9792 * last active line), the scanline counter will read vblank_start-1.
9794 * On gen2 the scanline counter starts counting from 1 instead
9795 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9796 * to keep the value positive), instead of adding one.
9798 * On HSW+ the behaviour of the scanline counter depends on the output
9799 * type. For DP ports it behaves like most other platforms, but on HDMI
9800 * there's an extra 1 line difference. So we need to add two instead of
9803 * On VLV/CHV DSI the scanline counter would appear to increment
9804 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9805 * that means we can't tell whether we're in vblank or not while
9806 * we're on that particular line. We must still set scanline_offset
9807 * to 1 so that the vblank timestamps come out correct when we query
9808 * the scanline counter from within the vblank interrupt handler.
9809 * However if queried just before the start of vblank we'll get an
9810 * answer that's slightly in the future.
9812 if (IS_GEN(dev_priv, 2)) {
9815 vtotal = adjusted_mode.crtc_vtotal;
9816 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9819 crtc->scanline_offset = vtotal - 1;
9820 } else if (HAS_DDI(dev_priv) &&
9821 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9822 crtc->scanline_offset = 2;
9824 crtc->scanline_offset = 1;
9828 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9830 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9831 struct intel_crtc_state *new_crtc_state;
9832 struct intel_crtc *crtc;
9835 if (!dev_priv->display.crtc_compute_clock)
9838 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9839 if (!intel_crtc_needs_modeset(new_crtc_state))
9842 intel_release_shared_dplls(state, crtc);
9847 * This implements the workaround described in the "notes" section of the mode
9848 * set sequence documentation. When going from no pipes or single pipe to
9849 * multiple pipes, and planes are enabled after the pipe, we need to wait at
9850 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9852 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9854 struct intel_crtc_state *crtc_state;
9855 struct intel_crtc *crtc;
9856 struct intel_crtc_state *first_crtc_state = NULL;
9857 struct intel_crtc_state *other_crtc_state = NULL;
9858 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9861 /* look at all crtc's that are going to be enabled in during modeset */
9862 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9863 if (!crtc_state->hw.active ||
9864 !intel_crtc_needs_modeset(crtc_state))
9867 if (first_crtc_state) {
9868 other_crtc_state = crtc_state;
9871 first_crtc_state = crtc_state;
9872 first_pipe = crtc->pipe;
9876 /* No workaround needed? */
9877 if (!first_crtc_state)
9880 /* w/a possibly needed, check how many crtc's are already enabled. */
9881 for_each_intel_crtc(state->base.dev, crtc) {
9882 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9883 if (IS_ERR(crtc_state))
9884 return PTR_ERR(crtc_state);
9886 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9888 if (!crtc_state->hw.active ||
9889 intel_crtc_needs_modeset(crtc_state))
9892 /* 2 or more enabled crtcs means no need for w/a */
9893 if (enabled_pipe != INVALID_PIPE)
9896 enabled_pipe = crtc->pipe;
9899 if (enabled_pipe != INVALID_PIPE)
9900 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9901 else if (other_crtc_state)
9902 other_crtc_state->hsw_workaround_pipe = first_pipe;
9907 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9910 const struct intel_crtc_state *crtc_state;
9911 struct intel_crtc *crtc;
9914 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9915 if (crtc_state->hw.active)
9916 active_pipes |= BIT(crtc->pipe);
9918 active_pipes &= ~BIT(crtc->pipe);
9921 return active_pipes;
9924 static int intel_modeset_checks(struct intel_atomic_state *state)
9926 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9928 state->modeset = true;
9930 if (IS_HASWELL(dev_priv))
9931 return hsw_mode_set_planes_workaround(state);
9937 * Handle calculation of various watermark data at the end of the atomic check
9938 * phase. The code here should be run after the per-crtc and per-plane 'check'
9939 * handlers to ensure that all derived state has been updated.
9941 static int calc_watermark_data(struct intel_atomic_state *state)
9943 struct drm_device *dev = state->base.dev;
9944 struct drm_i915_private *dev_priv = to_i915(dev);
9946 /* Is there platform-specific watermark information to calculate? */
9947 if (dev_priv->display.compute_global_watermarks)
9948 return dev_priv->display.compute_global_watermarks(state);
9953 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9954 struct intel_crtc_state *new_crtc_state)
9956 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9959 new_crtc_state->uapi.mode_changed = false;
9960 new_crtc_state->update_pipe = true;
9963 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9964 struct intel_crtc_state *new_crtc_state)
9967 * If we're not doing the full modeset we want to
9968 * keep the current M/N values as they may be
9969 * sufficiently different to the computed values
9970 * to cause problems.
9972 * FIXME: should really copy more fuzzy state here
9974 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9975 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9976 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9977 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9980 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9981 struct intel_crtc *crtc,
9984 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9985 struct intel_plane *plane;
9987 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9988 struct intel_plane_state *plane_state;
9990 if ((plane_ids_mask & BIT(plane->id)) == 0)
9993 plane_state = intel_atomic_get_plane_state(state, plane);
9994 if (IS_ERR(plane_state))
9995 return PTR_ERR(plane_state);
10001 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
10002 struct intel_crtc *crtc)
10004 const struct intel_crtc_state *old_crtc_state =
10005 intel_atomic_get_old_crtc_state(state, crtc);
10006 const struct intel_crtc_state *new_crtc_state =
10007 intel_atomic_get_new_crtc_state(state, crtc);
10009 return intel_crtc_add_planes_to_state(state, crtc,
10010 old_crtc_state->enabled_planes |
10011 new_crtc_state->enabled_planes);
10014 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
10016 /* See {hsw,vlv,ivb}_plane_ratio() */
10017 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
10018 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10019 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
10022 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
10023 struct intel_crtc *crtc,
10024 struct intel_crtc *other)
10026 const struct intel_plane_state *plane_state;
10027 struct intel_plane *plane;
10031 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10032 if (plane->pipe == crtc->pipe)
10033 plane_ids |= BIT(plane->id);
10036 return intel_crtc_add_planes_to_state(state, other, plane_ids);
10039 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
10041 const struct intel_crtc_state *crtc_state;
10042 struct intel_crtc *crtc;
10045 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10048 if (!crtc_state->bigjoiner)
10051 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
10052 crtc_state->bigjoiner_linked_crtc);
10060 static int intel_atomic_check_planes(struct intel_atomic_state *state)
10062 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10063 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10064 struct intel_plane_state *plane_state;
10065 struct intel_plane *plane;
10066 struct intel_crtc *crtc;
10069 ret = icl_add_linked_planes(state);
10073 ret = intel_bigjoiner_add_affected_planes(state);
10077 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10078 ret = intel_plane_atomic_check(state, plane);
10080 drm_dbg_atomic(&dev_priv->drm,
10081 "[PLANE:%d:%s] atomic driver check failed\n",
10082 plane->base.base.id, plane->base.name);
10087 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10088 new_crtc_state, i) {
10089 u8 old_active_planes, new_active_planes;
10091 ret = icl_check_nv12_planes(new_crtc_state);
10096 * On some platforms the number of active planes affects
10097 * the planes' minimum cdclk calculation. Add such planes
10098 * to the state before we compute the minimum cdclk.
10100 if (!active_planes_affects_min_cdclk(dev_priv))
10103 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
10104 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
10107 * Not only the number of planes, but if the plane configuration had
10108 * changed might already mean we need to recompute min CDCLK,
10109 * because different planes might consume different amount of Dbuf bandwidth
10110 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
10112 if (old_active_planes == new_active_planes)
10115 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
10123 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
10124 bool *need_cdclk_calc)
10126 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10127 const struct intel_cdclk_state *old_cdclk_state;
10128 const struct intel_cdclk_state *new_cdclk_state;
10129 struct intel_plane_state *plane_state;
10130 struct intel_bw_state *new_bw_state;
10131 struct intel_plane *plane;
10137 * active_planes bitmask has been updated, and potentially
10138 * affected planes are part of the state. We can now
10139 * compute the minimum cdclk for each plane.
10141 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10142 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
10147 old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
10148 new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
10150 if (new_cdclk_state &&
10151 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
10152 *need_cdclk_calc = true;
10154 ret = dev_priv->display.bw_calc_min_cdclk(state);
10158 new_bw_state = intel_atomic_get_new_bw_state(state);
10160 if (!new_cdclk_state || !new_bw_state)
10163 for_each_pipe(dev_priv, pipe) {
10164 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
10167 * Currently do this change only if we need to increase
10169 if (new_bw_state->min_cdclk > min_cdclk)
10170 *need_cdclk_calc = true;
10176 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
10178 struct intel_crtc_state *crtc_state;
10179 struct intel_crtc *crtc;
10182 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10183 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
10186 ret = intel_crtc_atomic_check(state, crtc);
10188 drm_dbg_atomic(&i915->drm,
10189 "[CRTC:%d:%s] atomic driver check failed\n",
10190 crtc->base.base.id, crtc->base.name);
10198 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
10201 const struct intel_crtc_state *new_crtc_state;
10202 struct intel_crtc *crtc;
10205 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10206 if (new_crtc_state->hw.enable &&
10207 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
10208 intel_crtc_needs_modeset(new_crtc_state))
10215 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
10216 struct intel_crtc *crtc,
10217 struct intel_crtc_state *old_crtc_state,
10218 struct intel_crtc_state *new_crtc_state)
10220 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10221 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
10222 struct intel_crtc *slave, *master;
10224 /* slave being enabled, is master is still claiming this crtc? */
10225 if (old_crtc_state->bigjoiner_slave) {
10227 master = old_crtc_state->bigjoiner_linked_crtc;
10228 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
10229 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
10233 if (!new_crtc_state->bigjoiner)
10236 if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
10237 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
10238 "CRTC + 1 to be used, doesn't exist\n",
10239 crtc->base.base.id, crtc->base.name);
10243 slave = new_crtc_state->bigjoiner_linked_crtc =
10244 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
10245 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
10247 if (IS_ERR(slave_crtc_state))
10248 return PTR_ERR(slave_crtc_state);
10250 /* master being enabled, slave was already configured? */
10251 if (slave_crtc_state->uapi.enable)
10254 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
10255 slave->base.base.id, slave->base.name);
10257 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
10260 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
10261 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
10262 slave->base.base.id, slave->base.name,
10263 master->base.base.id, master->base.name);
10267 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
10268 struct intel_crtc_state *master_crtc_state)
10270 struct intel_crtc_state *slave_crtc_state =
10271 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
10273 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
10274 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
10275 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
10276 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
10280 * DOC: asynchronous flip implementation
10282 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
10283 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
10284 * Correspondingly, support is currently added for primary plane only.
10286 * Async flip can only change the plane surface address, so anything else
10287 * changing is rejected from the intel_atomic_check_async() function.
10288 * Once this check is cleared, flip done interrupt is enabled using
10289 * the intel_crtc_enable_flip_done() function.
10291 * As soon as the surface address register is written, flip done interrupt is
10292 * generated and the requested events are sent to the usersapce in the interrupt
10293 * handler itself. The timestamp and sequence sent during the flip done event
10294 * correspond to the last vblank and have no relation to the actual time when
10295 * the flip done event was sent.
10297 static int intel_atomic_check_async(struct intel_atomic_state *state)
10299 struct drm_i915_private *i915 = to_i915(state->base.dev);
10300 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10301 const struct intel_plane_state *new_plane_state, *old_plane_state;
10302 struct intel_crtc *crtc;
10303 struct intel_plane *plane;
10306 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10307 new_crtc_state, i) {
10308 if (intel_crtc_needs_modeset(new_crtc_state)) {
10309 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
10313 if (!new_crtc_state->hw.active) {
10314 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
10317 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
10318 drm_dbg_kms(&i915->drm,
10319 "Active planes cannot be changed during async flip\n");
10324 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10325 new_plane_state, i) {
10327 * TODO: Async flip is only supported through the page flip IOCTL
10328 * as of now. So support currently added for primary plane only.
10329 * Support for other planes on platforms on which supports
10330 * this(vlv/chv and icl+) should be added when async flip is
10331 * enabled in the atomic IOCTL path.
10333 if (!plane->async_flip)
10337 * FIXME: This check is kept generic for all platforms.
10338 * Need to verify this for all gen9 and gen10 platforms to enable
10339 * this selectively if required.
10341 switch (new_plane_state->hw.fb->modifier) {
10342 case I915_FORMAT_MOD_X_TILED:
10343 case I915_FORMAT_MOD_Y_TILED:
10344 case I915_FORMAT_MOD_Yf_TILED:
10347 drm_dbg_kms(&i915->drm,
10348 "Linear memory/CCS does not support async flips\n");
10352 if (old_plane_state->color_plane[0].stride !=
10353 new_plane_state->color_plane[0].stride) {
10354 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
10358 if (old_plane_state->hw.fb->modifier !=
10359 new_plane_state->hw.fb->modifier) {
10360 drm_dbg_kms(&i915->drm,
10361 "Framebuffer modifiers cannot be changed in async flip\n");
10365 if (old_plane_state->hw.fb->format !=
10366 new_plane_state->hw.fb->format) {
10367 drm_dbg_kms(&i915->drm,
10368 "Framebuffer format cannot be changed in async flip\n");
10372 if (old_plane_state->hw.rotation !=
10373 new_plane_state->hw.rotation) {
10374 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
10378 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
10379 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
10380 drm_dbg_kms(&i915->drm,
10381 "Plane size/co-ordinates cannot be changed in async flip\n");
10385 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
10386 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
10390 if (old_plane_state->hw.pixel_blend_mode !=
10391 new_plane_state->hw.pixel_blend_mode) {
10392 drm_dbg_kms(&i915->drm,
10393 "Pixel blend mode cannot be changed in async flip\n");
10397 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
10398 drm_dbg_kms(&i915->drm,
10399 "Color encoding cannot be changed in async flip\n");
10403 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
10404 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
10412 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
10414 struct intel_crtc_state *crtc_state;
10415 struct intel_crtc *crtc;
10418 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10419 struct intel_crtc_state *linked_crtc_state;
10420 struct intel_crtc *linked_crtc;
10423 if (!crtc_state->bigjoiner)
10426 linked_crtc = crtc_state->bigjoiner_linked_crtc;
10427 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
10428 if (IS_ERR(linked_crtc_state))
10429 return PTR_ERR(linked_crtc_state);
10431 if (!intel_crtc_needs_modeset(crtc_state))
10434 linked_crtc_state->uapi.mode_changed = true;
10436 ret = drm_atomic_add_affected_connectors(&state->base,
10437 &linked_crtc->base);
10441 ret = intel_atomic_add_affected_planes(state, linked_crtc);
10446 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10447 /* Kill old bigjoiner link, we may re-establish afterwards */
10448 if (intel_crtc_needs_modeset(crtc_state) &&
10449 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
10450 kill_bigjoiner_slave(state, crtc_state);
10457 * intel_atomic_check - validate state object
10459 * @_state: state to validate
10461 static int intel_atomic_check(struct drm_device *dev,
10462 struct drm_atomic_state *_state)
10464 struct drm_i915_private *dev_priv = to_i915(dev);
10465 struct intel_atomic_state *state = to_intel_atomic_state(_state);
10466 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10467 struct intel_crtc *crtc;
10469 bool any_ms = false;
10471 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10472 new_crtc_state, i) {
10473 if (new_crtc_state->inherited != old_crtc_state->inherited)
10474 new_crtc_state->uapi.mode_changed = true;
10477 intel_vrr_check_modeset(state);
10479 ret = drm_atomic_helper_check_modeset(dev, &state->base);
10483 ret = intel_bigjoiner_add_affected_crtcs(state);
10487 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10488 new_crtc_state, i) {
10489 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10491 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
10496 if (!new_crtc_state->uapi.enable) {
10497 if (!new_crtc_state->bigjoiner_slave) {
10498 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
10504 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
10508 ret = intel_modeset_pipe_config(state, new_crtc_state);
10512 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
10518 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10519 new_crtc_state, i) {
10520 if (!intel_crtc_needs_modeset(new_crtc_state))
10523 ret = intel_modeset_pipe_config_late(new_crtc_state);
10527 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
10531 * Check if fastset is allowed by external dependencies like other
10532 * pipes and transcoders.
10534 * Right now it only forces a fullmodeset when the MST master
10535 * transcoder did not changed but the pipe of the master transcoder
10536 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
10537 * in case of port synced crtcs, if one of the synced crtcs
10538 * needs a full modeset, all other synced crtcs should be
10539 * forced a full modeset.
10541 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10542 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
10545 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
10546 enum transcoder master = new_crtc_state->mst_master_transcoder;
10548 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
10549 new_crtc_state->uapi.mode_changed = true;
10550 new_crtc_state->update_pipe = false;
10554 if (is_trans_port_sync_mode(new_crtc_state)) {
10555 u8 trans = new_crtc_state->sync_mode_slaves_mask;
10557 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
10558 trans |= BIT(new_crtc_state->master_transcoder);
10560 if (intel_cpu_transcoders_need_modeset(state, trans)) {
10561 new_crtc_state->uapi.mode_changed = true;
10562 new_crtc_state->update_pipe = false;
10566 if (new_crtc_state->bigjoiner) {
10567 struct intel_crtc_state *linked_crtc_state =
10568 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
10570 if (intel_crtc_needs_modeset(linked_crtc_state)) {
10571 new_crtc_state->uapi.mode_changed = true;
10572 new_crtc_state->update_pipe = false;
10577 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10578 new_crtc_state, i) {
10579 if (intel_crtc_needs_modeset(new_crtc_state)) {
10584 if (!new_crtc_state->update_pipe)
10587 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
10590 if (any_ms && !check_digital_port_conflicts(state)) {
10591 drm_dbg_kms(&dev_priv->drm,
10592 "rejecting conflicting digital port configuration\n");
10597 ret = drm_dp_mst_atomic_check(&state->base);
10601 ret = intel_atomic_check_planes(state);
10605 intel_fbc_choose_crtc(dev_priv, state);
10606 ret = calc_watermark_data(state);
10610 ret = intel_bw_atomic_check(state);
10614 ret = intel_atomic_check_cdclk(state, &any_ms);
10619 ret = intel_modeset_checks(state);
10623 ret = intel_modeset_calc_cdclk(state);
10627 intel_modeset_clear_plls(state);
10630 ret = intel_atomic_check_crtcs(state);
10634 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10635 new_crtc_state, i) {
10636 if (new_crtc_state->uapi.async_flip) {
10637 ret = intel_atomic_check_async(state);
10642 if (!intel_crtc_needs_modeset(new_crtc_state) &&
10643 !new_crtc_state->update_pipe)
10646 intel_dump_pipe_config(new_crtc_state, state,
10647 intel_crtc_needs_modeset(new_crtc_state) ?
10648 "[modeset]" : "[fastset]");
10654 if (ret == -EDEADLK)
10658 * FIXME would probably be nice to know which crtc specifically
10659 * caused the failure, in cases where we can pinpoint it.
10661 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10663 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10668 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10670 struct intel_crtc_state *crtc_state;
10671 struct intel_crtc *crtc;
10674 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10678 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10679 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10681 if (mode_changed || crtc_state->update_pipe ||
10682 crtc_state->uapi.color_mgmt_changed) {
10683 intel_dsb_prepare(crtc_state);
10690 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10691 struct intel_crtc_state *crtc_state)
10693 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10695 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
10696 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10698 if (crtc_state->has_pch_encoder) {
10699 enum pipe pch_transcoder =
10700 intel_crtc_pch_transcoder(crtc);
10702 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10706 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10707 const struct intel_crtc_state *new_crtc_state)
10709 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10710 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10713 * Update pipe size and adjust fitter if needed: the reason for this is
10714 * that in compute_mode_changes we check the native mode (not the pfit
10715 * mode) to see if we can flip rather than do a full mode set. In the
10716 * fastboot case, we'll flip, but if we don't update the pipesrc and
10717 * pfit state, we'll end up with a big fb scanned out into the wrong
10720 intel_set_pipe_src_size(new_crtc_state);
10722 /* on skylake this is done by detaching scalers */
10723 if (INTEL_GEN(dev_priv) >= 9) {
10724 skl_detach_scalers(new_crtc_state);
10726 if (new_crtc_state->pch_pfit.enabled)
10727 skl_pfit_enable(new_crtc_state);
10728 } else if (HAS_PCH_SPLIT(dev_priv)) {
10729 if (new_crtc_state->pch_pfit.enabled)
10730 ilk_pfit_enable(new_crtc_state);
10731 else if (old_crtc_state->pch_pfit.enabled)
10732 ilk_pfit_disable(old_crtc_state);
10736 * The register is supposedly single buffered so perhaps
10737 * not 100% correct to do this here. But SKL+ calculate
10738 * this based on the adjust pixel rate so pfit changes do
10739 * affect it and so it must be updated for fastsets.
10740 * HSW/BDW only really need this here for fastboot, after
10741 * that the value should not change without a full modeset.
10743 if (INTEL_GEN(dev_priv) >= 9 ||
10744 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10745 hsw_set_linetime_wm(new_crtc_state);
10747 if (INTEL_GEN(dev_priv) >= 11)
10748 icl_set_pipe_chicken(crtc);
10751 static void commit_pipe_config(struct intel_atomic_state *state,
10752 struct intel_crtc *crtc)
10754 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10755 const struct intel_crtc_state *old_crtc_state =
10756 intel_atomic_get_old_crtc_state(state, crtc);
10757 const struct intel_crtc_state *new_crtc_state =
10758 intel_atomic_get_new_crtc_state(state, crtc);
10759 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10762 * During modesets pipe configuration was programmed as the
10763 * CRTC was enabled.
10766 if (new_crtc_state->uapi.color_mgmt_changed ||
10767 new_crtc_state->update_pipe)
10768 intel_color_commit(new_crtc_state);
10770 if (INTEL_GEN(dev_priv) >= 9)
10771 skl_detach_scalers(new_crtc_state);
10773 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10774 bdw_set_pipemisc(new_crtc_state);
10776 if (new_crtc_state->update_pipe)
10777 intel_pipe_fastset(old_crtc_state, new_crtc_state);
10779 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10782 if (dev_priv->display.atomic_update_watermarks)
10783 dev_priv->display.atomic_update_watermarks(state, crtc);
10786 static void intel_enable_crtc(struct intel_atomic_state *state,
10787 struct intel_crtc *crtc)
10789 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10790 const struct intel_crtc_state *new_crtc_state =
10791 intel_atomic_get_new_crtc_state(state, crtc);
10793 if (!intel_crtc_needs_modeset(new_crtc_state))
10796 intel_crtc_update_active_timings(new_crtc_state);
10798 dev_priv->display.crtc_enable(state, crtc);
10800 if (new_crtc_state->bigjoiner_slave)
10803 /* vblanks work again, re-enable pipe CRC. */
10804 intel_crtc_enable_pipe_crc(crtc);
10807 static void intel_update_crtc(struct intel_atomic_state *state,
10808 struct intel_crtc *crtc)
10810 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10811 const struct intel_crtc_state *old_crtc_state =
10812 intel_atomic_get_old_crtc_state(state, crtc);
10813 struct intel_crtc_state *new_crtc_state =
10814 intel_atomic_get_new_crtc_state(state, crtc);
10815 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10818 if (new_crtc_state->preload_luts &&
10819 (new_crtc_state->uapi.color_mgmt_changed ||
10820 new_crtc_state->update_pipe))
10821 intel_color_load_luts(new_crtc_state);
10823 intel_pre_plane_update(state, crtc);
10825 if (new_crtc_state->update_pipe)
10826 intel_encoders_update_pipe(state, crtc);
10829 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10830 intel_fbc_disable(crtc);
10832 intel_fbc_enable(state, crtc);
10834 /* Perform vblank evasion around commit operation */
10835 intel_pipe_update_start(new_crtc_state);
10837 commit_pipe_config(state, crtc);
10839 if (INTEL_GEN(dev_priv) >= 9)
10840 skl_update_planes_on_crtc(state, crtc);
10842 i9xx_update_planes_on_crtc(state, crtc);
10844 intel_pipe_update_end(new_crtc_state);
10847 * We usually enable FIFO underrun interrupts as part of the
10848 * CRTC enable sequence during modesets. But when we inherit a
10849 * valid pipe configuration from the BIOS we need to take care
10850 * of enabling them on the CRTC's first fastset.
10852 if (new_crtc_state->update_pipe && !modeset &&
10853 old_crtc_state->inherited)
10854 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10857 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10858 struct intel_crtc_state *old_crtc_state,
10859 struct intel_crtc_state *new_crtc_state,
10860 struct intel_crtc *crtc)
10862 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10864 drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10866 intel_crtc_disable_planes(state, crtc);
10869 * We still need special handling for disabling bigjoiner master
10870 * and slaves since for slave we do not have encoder or plls
10871 * so we dont need to disable those.
10873 if (old_crtc_state->bigjoiner) {
10874 intel_crtc_disable_planes(state,
10875 old_crtc_state->bigjoiner_linked_crtc);
10876 old_crtc_state->bigjoiner_linked_crtc->active = false;
10880 * We need to disable pipe CRC before disabling the pipe,
10881 * or we race against vblank off.
10883 intel_crtc_disable_pipe_crc(crtc);
10885 dev_priv->display.crtc_disable(state, crtc);
10886 crtc->active = false;
10887 intel_fbc_disable(crtc);
10888 intel_disable_shared_dpll(old_crtc_state);
10890 /* FIXME unify this for all platforms */
10891 if (!new_crtc_state->hw.active &&
10892 !HAS_GMCH(dev_priv) &&
10893 dev_priv->display.initial_watermarks)
10894 dev_priv->display.initial_watermarks(state, crtc);
10897 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10899 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10900 struct intel_crtc *crtc;
10904 /* Only disable port sync and MST slaves */
10905 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10906 new_crtc_state, i) {
10907 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10910 if (!old_crtc_state->hw.active)
10913 /* In case of Transcoder port Sync master slave CRTCs can be
10914 * assigned in any order and we need to make sure that
10915 * slave CRTCs are disabled first and then master CRTC since
10916 * Slave vblanks are masked till Master Vblanks.
10918 if (!is_trans_port_sync_slave(old_crtc_state) &&
10919 !intel_dp_mst_is_slave_trans(old_crtc_state))
10922 intel_pre_plane_update(state, crtc);
10923 intel_old_crtc_state_disables(state, old_crtc_state,
10924 new_crtc_state, crtc);
10925 handled |= BIT(crtc->pipe);
10928 /* Disable everything else left on */
10929 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10930 new_crtc_state, i) {
10931 if (!intel_crtc_needs_modeset(new_crtc_state) ||
10932 (handled & BIT(crtc->pipe)) ||
10933 old_crtc_state->bigjoiner_slave)
10936 intel_pre_plane_update(state, crtc);
10937 if (old_crtc_state->bigjoiner) {
10938 struct intel_crtc *slave =
10939 old_crtc_state->bigjoiner_linked_crtc;
10941 intel_pre_plane_update(state, slave);
10944 if (old_crtc_state->hw.active)
10945 intel_old_crtc_state_disables(state, old_crtc_state,
10946 new_crtc_state, crtc);
10950 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10952 struct intel_crtc_state *new_crtc_state;
10953 struct intel_crtc *crtc;
10956 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10957 if (!new_crtc_state->hw.active)
10960 intel_enable_crtc(state, crtc);
10961 intel_update_crtc(state, crtc);
10965 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10967 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10968 struct intel_crtc *crtc;
10969 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10970 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10971 u8 update_pipes = 0, modeset_pipes = 0;
10974 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10975 enum pipe pipe = crtc->pipe;
10977 if (!new_crtc_state->hw.active)
10980 /* ignore allocations for crtc's that have been turned off. */
10981 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10982 entries[pipe] = old_crtc_state->wm.skl.ddb;
10983 update_pipes |= BIT(pipe);
10985 modeset_pipes |= BIT(pipe);
10990 * Whenever the number of active pipes changes, we need to make sure we
10991 * update the pipes in the right order so that their ddb allocations
10992 * never overlap with each other between CRTC updates. Otherwise we'll
10993 * cause pipe underruns and other bad stuff.
10995 * So first lets enable all pipes that do not need a fullmodeset as
10996 * those don't have any external dependency.
10998 while (update_pipes) {
10999 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11000 new_crtc_state, i) {
11001 enum pipe pipe = crtc->pipe;
11003 if ((update_pipes & BIT(pipe)) == 0)
11006 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
11007 entries, I915_MAX_PIPES, pipe))
11010 entries[pipe] = new_crtc_state->wm.skl.ddb;
11011 update_pipes &= ~BIT(pipe);
11013 intel_update_crtc(state, crtc);
11016 * If this is an already active pipe, it's DDB changed,
11017 * and this isn't the last pipe that needs updating
11018 * then we need to wait for a vblank to pass for the
11019 * new ddb allocation to take effect.
11021 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
11022 &old_crtc_state->wm.skl.ddb) &&
11023 (update_pipes | modeset_pipes))
11024 intel_wait_for_vblank(dev_priv, pipe);
11028 update_pipes = modeset_pipes;
11031 * Enable all pipes that needs a modeset and do not depends on other
11034 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11035 enum pipe pipe = crtc->pipe;
11037 if ((modeset_pipes & BIT(pipe)) == 0)
11040 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
11041 is_trans_port_sync_master(new_crtc_state) ||
11042 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
11045 modeset_pipes &= ~BIT(pipe);
11047 intel_enable_crtc(state, crtc);
11051 * Then we enable all remaining pipes that depend on other
11052 * pipes: MST slaves and port sync masters, big joiner master
11054 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11055 enum pipe pipe = crtc->pipe;
11057 if ((modeset_pipes & BIT(pipe)) == 0)
11060 modeset_pipes &= ~BIT(pipe);
11062 intel_enable_crtc(state, crtc);
11066 * Finally we do the plane updates/etc. for all pipes that got enabled.
11068 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11069 enum pipe pipe = crtc->pipe;
11071 if ((update_pipes & BIT(pipe)) == 0)
11074 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
11075 entries, I915_MAX_PIPES, pipe));
11077 entries[pipe] = new_crtc_state->wm.skl.ddb;
11078 update_pipes &= ~BIT(pipe);
11080 intel_update_crtc(state, crtc);
11083 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
11084 drm_WARN_ON(&dev_priv->drm, update_pipes);
11087 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
11089 struct intel_atomic_state *state, *next;
11090 struct llist_node *freed;
11092 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
11093 llist_for_each_entry_safe(state, next, freed, freed)
11094 drm_atomic_state_put(&state->base);
11097 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
11099 struct drm_i915_private *dev_priv =
11100 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
11102 intel_atomic_helper_free_state(dev_priv);
11105 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
11107 struct wait_queue_entry wait_fence, wait_reset;
11108 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
11110 init_wait_entry(&wait_fence, 0);
11111 init_wait_entry(&wait_reset, 0);
11113 prepare_to_wait(&intel_state->commit_ready.wait,
11114 &wait_fence, TASK_UNINTERRUPTIBLE);
11115 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
11116 I915_RESET_MODESET),
11117 &wait_reset, TASK_UNINTERRUPTIBLE);
11120 if (i915_sw_fence_done(&intel_state->commit_ready) ||
11121 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
11126 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
11127 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
11128 I915_RESET_MODESET),
11132 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
11134 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11135 struct intel_crtc *crtc;
11138 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11140 intel_dsb_cleanup(old_crtc_state);
11143 static void intel_atomic_cleanup_work(struct work_struct *work)
11145 struct intel_atomic_state *state =
11146 container_of(work, struct intel_atomic_state, base.commit_work);
11147 struct drm_i915_private *i915 = to_i915(state->base.dev);
11149 intel_cleanup_dsbs(state);
11150 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
11151 drm_atomic_helper_commit_cleanup_done(&state->base);
11152 drm_atomic_state_put(&state->base);
11154 intel_atomic_helper_free_state(i915);
11157 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
11159 struct drm_i915_private *i915 = to_i915(state->base.dev);
11160 struct intel_plane *plane;
11161 struct intel_plane_state *plane_state;
11164 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11165 struct drm_framebuffer *fb = plane_state->hw.fb;
11169 fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
11173 * The layout of the fast clear color value expected by HW
11174 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
11175 * - 4 x 4 bytes per-channel value
11176 * (in surface type specific float/int format provided by the fb user)
11177 * - 8 bytes native color value used by the display
11178 * (converted/written by GPU during a fast clear operation using the
11179 * above per-channel values)
11181 * The commit's FB prepare hook already ensured that FB obj is pinned and the
11182 * caller made sure that the object is synced wrt. the related color clear value
11185 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
11186 fb->offsets[2] + 16,
11187 &plane_state->ccval,
11188 sizeof(plane_state->ccval));
11189 /* The above could only fail if the FB obj has an unexpected backing store type. */
11190 drm_WARN_ON(&i915->drm, ret);
11194 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
11196 struct drm_device *dev = state->base.dev;
11197 struct drm_i915_private *dev_priv = to_i915(dev);
11198 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
11199 struct intel_crtc *crtc;
11200 u64 put_domains[I915_MAX_PIPES] = {};
11201 intel_wakeref_t wakeref = 0;
11204 intel_atomic_commit_fence_wait(state);
11206 drm_atomic_helper_wait_for_dependencies(&state->base);
11208 if (state->modeset)
11209 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
11211 intel_atomic_prepare_plane_clear_colors(state);
11213 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11214 new_crtc_state, i) {
11215 if (intel_crtc_needs_modeset(new_crtc_state) ||
11216 new_crtc_state->update_pipe) {
11218 put_domains[crtc->pipe] =
11219 modeset_get_crtc_power_domains(new_crtc_state);
11223 intel_commit_modeset_disables(state);
11225 /* FIXME: Eventually get rid of our crtc->config pointer */
11226 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11227 crtc->config = new_crtc_state;
11229 if (state->modeset) {
11230 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
11232 intel_set_cdclk_pre_plane_update(state);
11234 intel_modeset_verify_disabled(dev_priv, state);
11237 intel_sagv_pre_plane_update(state);
11239 /* Complete the events for pipes that have now been disabled */
11240 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11241 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11243 /* Complete events for now disable pipes here. */
11244 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
11245 spin_lock_irq(&dev->event_lock);
11246 drm_crtc_send_vblank_event(&crtc->base,
11247 new_crtc_state->uapi.event);
11248 spin_unlock_irq(&dev->event_lock);
11250 new_crtc_state->uapi.event = NULL;
11254 if (state->modeset)
11255 intel_encoders_update_prepare(state);
11257 intel_dbuf_pre_plane_update(state);
11259 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11260 if (new_crtc_state->uapi.async_flip)
11261 intel_crtc_enable_flip_done(state, crtc);
11264 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
11265 dev_priv->display.commit_modeset_enables(state);
11267 if (state->modeset) {
11268 intel_encoders_update_complete(state);
11270 intel_set_cdclk_post_plane_update(state);
11273 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
11274 * already, but still need the state for the delayed optimization. To
11276 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
11277 * - schedule that vblank worker _before_ calling hw_done
11278 * - at the start of commit_tail, cancel it _synchrously
11279 * - switch over to the vblank wait helper in the core after that since
11280 * we don't need out special handling any more.
11282 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
11284 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11285 if (new_crtc_state->uapi.async_flip)
11286 intel_crtc_disable_flip_done(state, crtc);
11288 if (new_crtc_state->hw.active &&
11289 !intel_crtc_needs_modeset(new_crtc_state) &&
11290 !new_crtc_state->preload_luts &&
11291 (new_crtc_state->uapi.color_mgmt_changed ||
11292 new_crtc_state->update_pipe))
11293 intel_color_load_luts(new_crtc_state);
11297 * Now that the vblank has passed, we can go ahead and program the
11298 * optimal watermarks on platforms that need two-step watermark
11301 * TODO: Move this (and other cleanup) to an async worker eventually.
11303 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11304 new_crtc_state, i) {
11306 * Gen2 reports pipe underruns whenever all planes are disabled.
11307 * So re-enable underrun reporting after some planes get enabled.
11309 * We do this before .optimize_watermarks() so that we have a
11310 * chance of catching underruns with the intermediate watermarks
11311 * vs. the new plane configuration.
11313 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
11314 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
11316 if (dev_priv->display.optimize_watermarks)
11317 dev_priv->display.optimize_watermarks(state, crtc);
11320 intel_dbuf_post_plane_update(state);
11322 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11323 intel_post_plane_update(state, crtc);
11325 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
11327 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
11330 * DSB cleanup is done in cleanup_work aligning with framebuffer
11331 * cleanup. So copy and reset the dsb structure to sync with
11332 * commit_done and later do dsb cleanup in cleanup_work.
11334 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
11337 /* Underruns don't always raise interrupts, so check manually */
11338 intel_check_cpu_fifo_underruns(dev_priv);
11339 intel_check_pch_fifo_underruns(dev_priv);
11341 if (state->modeset)
11342 intel_verify_planes(state);
11344 intel_sagv_post_plane_update(state);
11346 drm_atomic_helper_commit_hw_done(&state->base);
11348 if (state->modeset) {
11349 /* As one of the primary mmio accessors, KMS has a high
11350 * likelihood of triggering bugs in unclaimed access. After we
11351 * finish modesetting, see if an error has been flagged, and if
11352 * so enable debugging for the next modeset - and hope we catch
11355 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
11356 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
11358 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11361 * Defer the cleanup of the old state to a separate worker to not
11362 * impede the current task (userspace for blocking modesets) that
11363 * are executed inline. For out-of-line asynchronous modesets/flips,
11364 * deferring to a new worker seems overkill, but we would place a
11365 * schedule point (cond_resched()) here anyway to keep latencies
11368 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
11369 queue_work(system_highpri_wq, &state->base.commit_work);
11372 static void intel_atomic_commit_work(struct work_struct *work)
11374 struct intel_atomic_state *state =
11375 container_of(work, struct intel_atomic_state, base.commit_work);
11377 intel_atomic_commit_tail(state);
11380 static int __i915_sw_fence_call
11381 intel_atomic_commit_ready(struct i915_sw_fence *fence,
11382 enum i915_sw_fence_notify notify)
11384 struct intel_atomic_state *state =
11385 container_of(fence, struct intel_atomic_state, commit_ready);
11388 case FENCE_COMPLETE:
11389 /* we do blocking waits in the worker, nothing to do here */
11393 struct intel_atomic_helper *helper =
11394 &to_i915(state->base.dev)->atomic_helper;
11396 if (llist_add(&state->freed, &helper->free_list))
11397 schedule_work(&helper->free_work);
11402 return NOTIFY_DONE;
11405 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
11407 struct intel_plane_state *old_plane_state, *new_plane_state;
11408 struct intel_plane *plane;
11411 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
11412 new_plane_state, i)
11413 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
11414 to_intel_frontbuffer(new_plane_state->hw.fb),
11415 plane->frontbuffer_bit);
11418 static int intel_atomic_commit(struct drm_device *dev,
11419 struct drm_atomic_state *_state,
11422 struct intel_atomic_state *state = to_intel_atomic_state(_state);
11423 struct drm_i915_private *dev_priv = to_i915(dev);
11426 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
11428 drm_atomic_state_get(&state->base);
11429 i915_sw_fence_init(&state->commit_ready,
11430 intel_atomic_commit_ready);
11433 * The intel_legacy_cursor_update() fast path takes care
11434 * of avoiding the vblank waits for simple cursor
11435 * movement and flips. For cursor on/off and size changes,
11436 * we want to perform the vblank waits so that watermark
11437 * updates happen during the correct frames. Gen9+ have
11438 * double buffered watermarks and so shouldn't need this.
11440 * Unset state->legacy_cursor_update before the call to
11441 * drm_atomic_helper_setup_commit() because otherwise
11442 * drm_atomic_helper_wait_for_flip_done() is a noop and
11443 * we get FIFO underruns because we didn't wait
11446 * FIXME doing watermarks and fb cleanup from a vblank worker
11447 * (assuming we had any) would solve these problems.
11449 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
11450 struct intel_crtc_state *new_crtc_state;
11451 struct intel_crtc *crtc;
11454 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11455 if (new_crtc_state->wm.need_postvbl_update ||
11456 new_crtc_state->update_wm_post)
11457 state->base.legacy_cursor_update = false;
11460 ret = intel_atomic_prepare_commit(state);
11462 drm_dbg_atomic(&dev_priv->drm,
11463 "Preparing state failed with %i\n", ret);
11464 i915_sw_fence_commit(&state->commit_ready);
11465 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11469 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
11471 ret = drm_atomic_helper_swap_state(&state->base, true);
11473 intel_atomic_swap_global_state(state);
11476 struct intel_crtc_state *new_crtc_state;
11477 struct intel_crtc *crtc;
11480 i915_sw_fence_commit(&state->commit_ready);
11482 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11483 intel_dsb_cleanup(new_crtc_state);
11485 drm_atomic_helper_cleanup_planes(dev, &state->base);
11486 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11489 intel_shared_dpll_swap_state(state);
11490 intel_atomic_track_fbs(state);
11492 drm_atomic_state_get(&state->base);
11493 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
11495 i915_sw_fence_commit(&state->commit_ready);
11496 if (nonblock && state->modeset) {
11497 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
11498 } else if (nonblock) {
11499 queue_work(dev_priv->flip_wq, &state->base.commit_work);
11501 if (state->modeset)
11502 flush_workqueue(dev_priv->modeset_wq);
11503 intel_atomic_commit_tail(state);
11509 struct wait_rps_boost {
11510 struct wait_queue_entry wait;
11512 struct drm_crtc *crtc;
11513 struct i915_request *request;
11516 static int do_rps_boost(struct wait_queue_entry *_wait,
11517 unsigned mode, int sync, void *key)
11519 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
11520 struct i915_request *rq = wait->request;
11523 * If we missed the vblank, but the request is already running it
11524 * is reasonable to assume that it will complete before the next
11525 * vblank without our intervention, so leave RPS alone.
11527 if (!i915_request_started(rq))
11528 intel_rps_boost(rq);
11529 i915_request_put(rq);
11531 drm_crtc_vblank_put(wait->crtc);
11533 list_del(&wait->wait.entry);
11538 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
11539 struct dma_fence *fence)
11541 struct wait_rps_boost *wait;
11543 if (!dma_fence_is_i915(fence))
11546 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
11549 if (drm_crtc_vblank_get(crtc))
11552 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
11554 drm_crtc_vblank_put(crtc);
11558 wait->request = to_request(dma_fence_get(fence));
11561 wait->wait.func = do_rps_boost;
11562 wait->wait.flags = 0;
11564 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
11567 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
11569 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11570 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11571 struct drm_framebuffer *fb = plane_state->hw.fb;
11572 struct i915_vma *vma;
11574 if (plane->id == PLANE_CURSOR &&
11575 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
11576 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11577 const int align = intel_cursor_alignment(dev_priv);
11580 err = i915_gem_object_attach_phys(obj, align);
11585 vma = intel_pin_and_fence_fb_obj(fb,
11586 &plane_state->view,
11587 intel_plane_uses_fence(plane_state),
11588 &plane_state->flags);
11590 return PTR_ERR(vma);
11592 plane_state->vma = vma;
11597 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
11599 struct i915_vma *vma;
11601 vma = fetch_and_zero(&old_plane_state->vma);
11603 intel_unpin_fb_vma(vma, old_plane_state->flags);
11606 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
11608 struct i915_sched_attr attr = {
11609 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
11612 i915_gem_object_wait_priority(obj, 0, &attr);
11616 * intel_prepare_plane_fb - Prepare fb for usage on plane
11617 * @_plane: drm plane to prepare for
11618 * @_new_plane_state: the plane state being prepared
11620 * Prepares a framebuffer for usage on a display plane. Generally this
11621 * involves pinning the underlying object and updating the frontbuffer tracking
11622 * bits. Some older platforms need special physical address handling for
11625 * Returns 0 on success, negative error code on failure.
11628 intel_prepare_plane_fb(struct drm_plane *_plane,
11629 struct drm_plane_state *_new_plane_state)
11631 struct intel_plane *plane = to_intel_plane(_plane);
11632 struct intel_plane_state *new_plane_state =
11633 to_intel_plane_state(_new_plane_state);
11634 struct intel_atomic_state *state =
11635 to_intel_atomic_state(new_plane_state->uapi.state);
11636 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11637 const struct intel_plane_state *old_plane_state =
11638 intel_atomic_get_old_plane_state(state, plane);
11639 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11640 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11644 const struct intel_crtc_state *crtc_state =
11645 intel_atomic_get_new_crtc_state(state,
11646 to_intel_crtc(old_plane_state->hw.crtc));
11648 /* Big Hammer, we also need to ensure that any pending
11649 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11650 * current scanout is retired before unpinning the old
11651 * framebuffer. Note that we rely on userspace rendering
11652 * into the buffer attached to the pipe they are waiting
11653 * on. If not, userspace generates a GPU hang with IPEHR
11654 * point to the MI_WAIT_FOR_EVENT.
11656 * This should only fail upon a hung GPU, in which case we
11657 * can safely continue.
11659 if (intel_crtc_needs_modeset(crtc_state)) {
11660 ret = i915_sw_fence_await_reservation(&state->commit_ready,
11661 old_obj->base.resv, NULL,
11669 if (new_plane_state->uapi.fence) { /* explicit fencing */
11670 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11671 new_plane_state->uapi.fence,
11672 i915_fence_timeout(dev_priv),
11681 ret = i915_gem_object_pin_pages(obj);
11685 ret = intel_plane_pin_fb(new_plane_state);
11687 i915_gem_object_unpin_pages(obj);
11691 fb_obj_bump_render_priority(obj);
11692 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11694 if (!new_plane_state->uapi.fence) { /* implicit fencing */
11695 struct dma_fence *fence;
11697 ret = i915_sw_fence_await_reservation(&state->commit_ready,
11698 obj->base.resv, NULL,
11700 i915_fence_timeout(dev_priv),
11705 fence = dma_resv_get_excl_rcu(obj->base.resv);
11707 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11709 dma_fence_put(fence);
11712 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11713 new_plane_state->uapi.fence);
11717 * We declare pageflips to be interactive and so merit a small bias
11718 * towards upclocking to deliver the frame on time. By only changing
11719 * the RPS thresholds to sample more regularly and aim for higher
11720 * clocks we can hopefully deliver low power workloads (like kodi)
11721 * that are not quite steady state without resorting to forcing
11722 * maximum clocks following a vblank miss (see do_rps_boost()).
11724 if (!state->rps_interactive) {
11725 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11726 state->rps_interactive = true;
11732 intel_plane_unpin_fb(new_plane_state);
11738 * intel_cleanup_plane_fb - Cleans up an fb after plane use
11739 * @plane: drm plane to clean up for
11740 * @_old_plane_state: the state from the previous modeset
11742 * Cleans up a framebuffer that has just been removed from a plane.
11745 intel_cleanup_plane_fb(struct drm_plane *plane,
11746 struct drm_plane_state *_old_plane_state)
11748 struct intel_plane_state *old_plane_state =
11749 to_intel_plane_state(_old_plane_state);
11750 struct intel_atomic_state *state =
11751 to_intel_atomic_state(old_plane_state->uapi.state);
11752 struct drm_i915_private *dev_priv = to_i915(plane->dev);
11753 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11758 if (state->rps_interactive) {
11759 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11760 state->rps_interactive = false;
11763 /* Should only be called after a successful intel_prepare_plane_fb()! */
11764 intel_plane_unpin_fb(old_plane_state);
11768 * intel_plane_destroy - destroy a plane
11769 * @plane: plane to destroy
11771 * Common destruction function for all types of planes (primary, cursor,
11774 void intel_plane_destroy(struct drm_plane *plane)
11776 drm_plane_cleanup(plane);
11777 kfree(to_intel_plane(plane));
11780 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11782 struct intel_plane *plane;
11784 for_each_intel_plane(&dev_priv->drm, plane) {
11785 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11788 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11793 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11794 struct drm_file *file)
11796 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11797 struct drm_crtc *drmmode_crtc;
11798 struct intel_crtc *crtc;
11800 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11804 crtc = to_intel_crtc(drmmode_crtc);
11805 pipe_from_crtc_id->pipe = crtc->pipe;
11810 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11812 struct drm_device *dev = encoder->base.dev;
11813 struct intel_encoder *source_encoder;
11814 u32 possible_clones = 0;
11816 for_each_intel_encoder(dev, source_encoder) {
11817 if (encoders_cloneable(encoder, source_encoder))
11818 possible_clones |= drm_encoder_mask(&source_encoder->base);
11821 return possible_clones;
11824 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11826 struct drm_device *dev = encoder->base.dev;
11827 struct intel_crtc *crtc;
11828 u32 possible_crtcs = 0;
11830 for_each_intel_crtc(dev, crtc) {
11831 if (encoder->pipe_mask & BIT(crtc->pipe))
11832 possible_crtcs |= drm_crtc_mask(&crtc->base);
11835 return possible_crtcs;
11838 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11840 if (!IS_MOBILE(dev_priv))
11843 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11846 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11852 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11854 if (INTEL_GEN(dev_priv) >= 9)
11857 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11860 if (HAS_PCH_LPT_H(dev_priv) &&
11861 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11864 /* DDI E can't be used if DDI A requires 4 lanes */
11865 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11868 if (!dev_priv->vbt.int_crt_support)
11874 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11876 struct intel_encoder *encoder;
11877 bool dpd_is_edp = false;
11879 intel_pps_unlock_regs_wa(dev_priv);
11881 if (!HAS_DISPLAY(dev_priv))
11884 if (IS_ALDERLAKE_S(dev_priv)) {
11885 intel_ddi_init(dev_priv, PORT_A);
11886 intel_ddi_init(dev_priv, PORT_TC1);
11887 intel_ddi_init(dev_priv, PORT_TC2);
11888 intel_ddi_init(dev_priv, PORT_TC3);
11889 intel_ddi_init(dev_priv, PORT_TC4);
11890 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11891 intel_ddi_init(dev_priv, PORT_A);
11892 intel_ddi_init(dev_priv, PORT_B);
11893 intel_ddi_init(dev_priv, PORT_TC1);
11894 intel_ddi_init(dev_priv, PORT_TC2);
11895 } else if (INTEL_GEN(dev_priv) >= 12) {
11896 intel_ddi_init(dev_priv, PORT_A);
11897 intel_ddi_init(dev_priv, PORT_B);
11898 intel_ddi_init(dev_priv, PORT_TC1);
11899 intel_ddi_init(dev_priv, PORT_TC2);
11900 intel_ddi_init(dev_priv, PORT_TC3);
11901 intel_ddi_init(dev_priv, PORT_TC4);
11902 intel_ddi_init(dev_priv, PORT_TC5);
11903 intel_ddi_init(dev_priv, PORT_TC6);
11904 icl_dsi_init(dev_priv);
11905 } else if (IS_JSL_EHL(dev_priv)) {
11906 intel_ddi_init(dev_priv, PORT_A);
11907 intel_ddi_init(dev_priv, PORT_B);
11908 intel_ddi_init(dev_priv, PORT_C);
11909 intel_ddi_init(dev_priv, PORT_D);
11910 icl_dsi_init(dev_priv);
11911 } else if (IS_GEN(dev_priv, 11)) {
11912 intel_ddi_init(dev_priv, PORT_A);
11913 intel_ddi_init(dev_priv, PORT_B);
11914 intel_ddi_init(dev_priv, PORT_C);
11915 intel_ddi_init(dev_priv, PORT_D);
11916 intel_ddi_init(dev_priv, PORT_E);
11918 * On some ICL SKUs port F is not present. No strap bits for
11919 * this, so rely on VBT.
11920 * Work around broken VBTs on SKUs known to have no port F.
11922 if (IS_ICL_WITH_PORT_F(dev_priv) &&
11923 intel_bios_is_port_present(dev_priv, PORT_F))
11924 intel_ddi_init(dev_priv, PORT_F);
11926 icl_dsi_init(dev_priv);
11927 } else if (IS_GEN9_LP(dev_priv)) {
11929 * FIXME: Broxton doesn't support port detection via the
11930 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
11931 * detect the ports.
11933 intel_ddi_init(dev_priv, PORT_A);
11934 intel_ddi_init(dev_priv, PORT_B);
11935 intel_ddi_init(dev_priv, PORT_C);
11937 vlv_dsi_init(dev_priv);
11938 } else if (HAS_DDI(dev_priv)) {
11941 if (intel_ddi_crt_present(dev_priv))
11942 intel_crt_init(dev_priv);
11945 * Haswell uses DDI functions to detect digital outputs.
11946 * On SKL pre-D0 the strap isn't connected. Later SKUs may or
11947 * may not have it - it was supposed to be fixed by the same
11948 * time we stopped using straps. Assume it's there.
11950 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11951 /* WaIgnoreDDIAStrap: skl */
11952 if (found || IS_GEN9_BC(dev_priv))
11953 intel_ddi_init(dev_priv, PORT_A);
11955 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
11957 found = intel_de_read(dev_priv, SFUSE_STRAP);
11959 if (found & SFUSE_STRAP_DDIB_DETECTED)
11960 intel_ddi_init(dev_priv, PORT_B);
11961 if (found & SFUSE_STRAP_DDIC_DETECTED)
11962 intel_ddi_init(dev_priv, PORT_C);
11963 if (found & SFUSE_STRAP_DDID_DETECTED)
11964 intel_ddi_init(dev_priv, PORT_D);
11965 if (found & SFUSE_STRAP_DDIF_DETECTED)
11966 intel_ddi_init(dev_priv, PORT_F);
11968 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
11970 if (IS_GEN9_BC(dev_priv) &&
11971 intel_bios_is_port_present(dev_priv, PORT_E))
11972 intel_ddi_init(dev_priv, PORT_E);
11974 } else if (HAS_PCH_SPLIT(dev_priv)) {
11978 * intel_edp_init_connector() depends on this completing first,
11979 * to prevent the registration of both eDP and LVDS and the
11980 * incorrect sharing of the PPS.
11982 intel_lvds_init(dev_priv);
11983 intel_crt_init(dev_priv);
11985 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11987 if (ilk_has_edp_a(dev_priv))
11988 intel_dp_init(dev_priv, DP_A, PORT_A);
11990 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11991 /* PCH SDVOB multiplex with HDMIB */
11992 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11994 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11995 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11996 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
11999 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
12000 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
12002 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
12003 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
12005 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
12006 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
12008 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
12009 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
12010 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12011 bool has_edp, has_port;
12013 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
12014 intel_crt_init(dev_priv);
12017 * The DP_DETECTED bit is the latched state of the DDC
12018 * SDA pin at boot. However since eDP doesn't require DDC
12019 * (no way to plug in a DP->HDMI dongle) the DDC pins for
12020 * eDP ports may have been muxed to an alternate function.
12021 * Thus we can't rely on the DP_DETECTED bit alone to detect
12022 * eDP ports. Consult the VBT as well as DP_DETECTED to
12023 * detect eDP ports.
12025 * Sadly the straps seem to be missing sometimes even for HDMI
12026 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
12027 * and VBT for the presence of the port. Additionally we can't
12028 * trust the port type the VBT declares as we've seen at least
12029 * HDMI ports that the VBT claim are DP or eDP.
12031 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
12032 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
12033 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
12034 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
12035 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
12036 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
12038 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
12039 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
12040 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
12041 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
12042 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
12043 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
12045 if (IS_CHERRYVIEW(dev_priv)) {
12047 * eDP not supported on port D,
12048 * so no need to worry about it
12050 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
12051 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
12052 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
12053 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
12054 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
12057 vlv_dsi_init(dev_priv);
12058 } else if (IS_PINEVIEW(dev_priv)) {
12059 intel_lvds_init(dev_priv);
12060 intel_crt_init(dev_priv);
12061 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
12062 bool found = false;
12064 if (IS_MOBILE(dev_priv))
12065 intel_lvds_init(dev_priv);
12067 intel_crt_init(dev_priv);
12069 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
12070 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
12071 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
12072 if (!found && IS_G4X(dev_priv)) {
12073 drm_dbg_kms(&dev_priv->drm,
12074 "probing HDMI on SDVOB\n");
12075 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
12078 if (!found && IS_G4X(dev_priv))
12079 intel_dp_init(dev_priv, DP_B, PORT_B);
12082 /* Before G4X SDVOC doesn't have its own detect register */
12084 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
12085 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
12086 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
12089 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
12091 if (IS_G4X(dev_priv)) {
12092 drm_dbg_kms(&dev_priv->drm,
12093 "probing HDMI on SDVOC\n");
12094 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
12096 if (IS_G4X(dev_priv))
12097 intel_dp_init(dev_priv, DP_C, PORT_C);
12100 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
12101 intel_dp_init(dev_priv, DP_D, PORT_D);
12103 if (SUPPORTS_TV(dev_priv))
12104 intel_tv_init(dev_priv);
12105 } else if (IS_GEN(dev_priv, 2)) {
12106 if (IS_I85X(dev_priv))
12107 intel_lvds_init(dev_priv);
12109 intel_crt_init(dev_priv);
12110 intel_dvo_init(dev_priv);
12113 for_each_intel_encoder(&dev_priv->drm, encoder) {
12114 encoder->base.possible_crtcs =
12115 intel_encoder_possible_crtcs(encoder);
12116 encoder->base.possible_clones =
12117 intel_encoder_possible_clones(encoder);
12120 intel_init_pch_refclk(dev_priv);
12122 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
12125 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
12127 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
12129 drm_framebuffer_cleanup(fb);
12130 intel_frontbuffer_put(intel_fb->frontbuffer);
12135 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
12136 struct drm_file *file,
12137 unsigned int *handle)
12139 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12140 struct drm_i915_private *i915 = to_i915(obj->base.dev);
12142 if (obj->userptr.mm) {
12143 drm_dbg(&i915->drm,
12144 "attempting to use a userptr for a framebuffer, denied\n");
12148 return drm_gem_handle_create(file, &obj->base, handle);
12151 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
12152 struct drm_file *file,
12153 unsigned flags, unsigned color,
12154 struct drm_clip_rect *clips,
12155 unsigned num_clips)
12157 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12159 i915_gem_object_flush_if_display(obj);
12160 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
12165 static const struct drm_framebuffer_funcs intel_fb_funcs = {
12166 .destroy = intel_user_framebuffer_destroy,
12167 .create_handle = intel_user_framebuffer_create_handle,
12168 .dirty = intel_user_framebuffer_dirty,
12171 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
12172 struct drm_i915_gem_object *obj,
12173 struct drm_mode_fb_cmd2 *mode_cmd)
12175 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
12176 struct drm_framebuffer *fb = &intel_fb->base;
12178 unsigned int tiling, stride;
12182 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
12183 if (!intel_fb->frontbuffer)
12186 i915_gem_object_lock(obj, NULL);
12187 tiling = i915_gem_object_get_tiling(obj);
12188 stride = i915_gem_object_get_stride(obj);
12189 i915_gem_object_unlock(obj);
12191 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
12193 * If there's a fence, enforce that
12194 * the fb modifier and tiling mode match.
12196 if (tiling != I915_TILING_NONE &&
12197 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
12198 drm_dbg_kms(&dev_priv->drm,
12199 "tiling_mode doesn't match fb modifier\n");
12203 if (tiling == I915_TILING_X) {
12204 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
12205 } else if (tiling == I915_TILING_Y) {
12206 drm_dbg_kms(&dev_priv->drm,
12207 "No Y tiling for legacy addfb\n");
12212 if (!drm_any_plane_has_format(&dev_priv->drm,
12213 mode_cmd->pixel_format,
12214 mode_cmd->modifier[0])) {
12215 struct drm_format_name_buf format_name;
12217 drm_dbg_kms(&dev_priv->drm,
12218 "unsupported pixel format %s / modifier 0x%llx\n",
12219 drm_get_format_name(mode_cmd->pixel_format,
12221 mode_cmd->modifier[0]);
12226 * gen2/3 display engine uses the fence if present,
12227 * so the tiling mode must match the fb modifier exactly.
12229 if (INTEL_GEN(dev_priv) < 4 &&
12230 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
12231 drm_dbg_kms(&dev_priv->drm,
12232 "tiling_mode must match fb modifier exactly on gen2/3\n");
12236 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
12237 mode_cmd->modifier[0]);
12238 if (mode_cmd->pitches[0] > max_stride) {
12239 drm_dbg_kms(&dev_priv->drm,
12240 "%s pitch (%u) must be at most %d\n",
12241 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
12242 "tiled" : "linear",
12243 mode_cmd->pitches[0], max_stride);
12248 * If there's a fence, enforce that
12249 * the fb pitch and fence stride match.
12251 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
12252 drm_dbg_kms(&dev_priv->drm,
12253 "pitch (%d) must match tiling stride (%d)\n",
12254 mode_cmd->pitches[0], stride);
12258 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12259 if (mode_cmd->offsets[0] != 0) {
12260 drm_dbg_kms(&dev_priv->drm,
12261 "plane 0 offset (0x%08x) must be 0\n",
12262 mode_cmd->offsets[0]);
12266 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
12268 for (i = 0; i < fb->format->num_planes; i++) {
12269 u32 stride_alignment;
12271 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
12272 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
12277 stride_alignment = intel_fb_stride_alignment(fb, i);
12278 if (fb->pitches[i] & (stride_alignment - 1)) {
12279 drm_dbg_kms(&dev_priv->drm,
12280 "plane %d pitch (%d) must be at least %u byte aligned\n",
12281 i, fb->pitches[i], stride_alignment);
12285 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
12286 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
12288 if (fb->pitches[i] != ccs_aux_stride) {
12289 drm_dbg_kms(&dev_priv->drm,
12290 "ccs aux plane %d pitch (%d) must be %d\n",
12292 fb->pitches[i], ccs_aux_stride);
12297 fb->obj[i] = &obj->base;
12300 ret = intel_fill_fb_info(dev_priv, fb);
12304 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
12306 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
12313 intel_frontbuffer_put(intel_fb->frontbuffer);
12317 static struct drm_framebuffer *
12318 intel_user_framebuffer_create(struct drm_device *dev,
12319 struct drm_file *filp,
12320 const struct drm_mode_fb_cmd2 *user_mode_cmd)
12322 struct drm_framebuffer *fb;
12323 struct drm_i915_gem_object *obj;
12324 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
12326 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
12328 return ERR_PTR(-ENOENT);
12330 fb = intel_framebuffer_create(obj, &mode_cmd);
12331 i915_gem_object_put(obj);
12336 static enum drm_mode_status
12337 intel_mode_valid(struct drm_device *dev,
12338 const struct drm_display_mode *mode)
12340 struct drm_i915_private *dev_priv = to_i915(dev);
12341 int hdisplay_max, htotal_max;
12342 int vdisplay_max, vtotal_max;
12345 * Can't reject DBLSCAN here because Xorg ddxen can add piles
12346 * of DBLSCAN modes to the output's mode list when they detect
12347 * the scaling mode property on the connector. And they don't
12348 * ask the kernel to validate those modes in any way until
12349 * modeset time at which point the client gets a protocol error.
12350 * So in order to not upset those clients we silently ignore the
12351 * DBLSCAN flag on such connectors. For other connectors we will
12352 * reject modes with the DBLSCAN flag in encoder->compute_config().
12353 * And we always reject DBLSCAN modes in connector->mode_valid()
12354 * as we never want such modes on the connector's mode list.
12357 if (mode->vscan > 1)
12358 return MODE_NO_VSCAN;
12360 if (mode->flags & DRM_MODE_FLAG_HSKEW)
12361 return MODE_H_ILLEGAL;
12363 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
12364 DRM_MODE_FLAG_NCSYNC |
12365 DRM_MODE_FLAG_PCSYNC))
12368 if (mode->flags & (DRM_MODE_FLAG_BCAST |
12369 DRM_MODE_FLAG_PIXMUX |
12370 DRM_MODE_FLAG_CLKDIV2))
12373 /* Transcoder timing limits */
12374 if (INTEL_GEN(dev_priv) >= 11) {
12375 hdisplay_max = 16384;
12376 vdisplay_max = 8192;
12377 htotal_max = 16384;
12379 } else if (INTEL_GEN(dev_priv) >= 9 ||
12380 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12381 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
12382 vdisplay_max = 4096;
12385 } else if (INTEL_GEN(dev_priv) >= 3) {
12386 hdisplay_max = 4096;
12387 vdisplay_max = 4096;
12391 hdisplay_max = 2048;
12392 vdisplay_max = 2048;
12397 if (mode->hdisplay > hdisplay_max ||
12398 mode->hsync_start > htotal_max ||
12399 mode->hsync_end > htotal_max ||
12400 mode->htotal > htotal_max)
12401 return MODE_H_ILLEGAL;
12403 if (mode->vdisplay > vdisplay_max ||
12404 mode->vsync_start > vtotal_max ||
12405 mode->vsync_end > vtotal_max ||
12406 mode->vtotal > vtotal_max)
12407 return MODE_V_ILLEGAL;
12409 if (INTEL_GEN(dev_priv) >= 5) {
12410 if (mode->hdisplay < 64 ||
12411 mode->htotal - mode->hdisplay < 32)
12412 return MODE_H_ILLEGAL;
12414 if (mode->vtotal - mode->vdisplay < 5)
12415 return MODE_V_ILLEGAL;
12417 if (mode->htotal - mode->hdisplay < 32)
12418 return MODE_H_ILLEGAL;
12420 if (mode->vtotal - mode->vdisplay < 3)
12421 return MODE_V_ILLEGAL;
12427 enum drm_mode_status
12428 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
12429 const struct drm_display_mode *mode,
12432 int plane_width_max, plane_height_max;
12435 * intel_mode_valid() should be
12436 * sufficient on older platforms.
12438 if (INTEL_GEN(dev_priv) < 9)
12442 * Most people will probably want a fullscreen
12443 * plane so let's not advertize modes that are
12444 * too big for that.
12446 if (INTEL_GEN(dev_priv) >= 11) {
12447 plane_width_max = 5120 << bigjoiner;
12448 plane_height_max = 4320;
12450 plane_width_max = 5120;
12451 plane_height_max = 4096;
12454 if (mode->hdisplay > plane_width_max)
12455 return MODE_H_ILLEGAL;
12457 if (mode->vdisplay > plane_height_max)
12458 return MODE_V_ILLEGAL;
12463 static const struct drm_mode_config_funcs intel_mode_funcs = {
12464 .fb_create = intel_user_framebuffer_create,
12465 .get_format_info = intel_get_format_info,
12466 .output_poll_changed = intel_fbdev_output_poll_changed,
12467 .mode_valid = intel_mode_valid,
12468 .atomic_check = intel_atomic_check,
12469 .atomic_commit = intel_atomic_commit,
12470 .atomic_state_alloc = intel_atomic_state_alloc,
12471 .atomic_state_clear = intel_atomic_state_clear,
12472 .atomic_state_free = intel_atomic_state_free,
12476 * intel_init_display_hooks - initialize the display modesetting hooks
12477 * @dev_priv: device private
12479 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
12481 intel_init_cdclk_hooks(dev_priv);
12483 intel_dpll_init_clock_hook(dev_priv);
12485 if (INTEL_GEN(dev_priv) >= 9) {
12486 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12487 dev_priv->display.crtc_enable = hsw_crtc_enable;
12488 dev_priv->display.crtc_disable = hsw_crtc_disable;
12489 } else if (HAS_DDI(dev_priv)) {
12490 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12491 dev_priv->display.crtc_enable = hsw_crtc_enable;
12492 dev_priv->display.crtc_disable = hsw_crtc_disable;
12493 } else if (HAS_PCH_SPLIT(dev_priv)) {
12494 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
12495 dev_priv->display.crtc_enable = ilk_crtc_enable;
12496 dev_priv->display.crtc_disable = ilk_crtc_disable;
12497 } else if (IS_CHERRYVIEW(dev_priv) ||
12498 IS_VALLEYVIEW(dev_priv)) {
12499 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12500 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12501 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12503 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12504 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12505 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12508 intel_fdi_init_hook(dev_priv);
12510 if (INTEL_GEN(dev_priv) >= 9) {
12511 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
12512 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
12514 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
12515 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
12520 void intel_modeset_init_hw(struct drm_i915_private *i915)
12522 struct intel_cdclk_state *cdclk_state =
12523 to_intel_cdclk_state(i915->cdclk.obj.state);
12525 intel_update_cdclk(i915);
12526 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
12527 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
12530 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
12532 struct drm_plane *plane;
12533 struct intel_crtc *crtc;
12535 for_each_intel_crtc(state->dev, crtc) {
12536 struct intel_crtc_state *crtc_state;
12538 crtc_state = intel_atomic_get_crtc_state(state, crtc);
12539 if (IS_ERR(crtc_state))
12540 return PTR_ERR(crtc_state);
12542 if (crtc_state->hw.active) {
12544 * Preserve the inherited flag to avoid
12545 * taking the full modeset path.
12547 crtc_state->inherited = true;
12551 drm_for_each_plane(plane, state->dev) {
12552 struct drm_plane_state *plane_state;
12554 plane_state = drm_atomic_get_plane_state(state, plane);
12555 if (IS_ERR(plane_state))
12556 return PTR_ERR(plane_state);
12563 * Calculate what we think the watermarks should be for the state we've read
12564 * out of the hardware and then immediately program those watermarks so that
12565 * we ensure the hardware settings match our internal state.
12567 * We can calculate what we think WM's should be by creating a duplicate of the
12568 * current state (which was constructed during hardware readout) and running it
12569 * through the atomic check code to calculate new watermark values in the
12572 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
12574 struct drm_atomic_state *state;
12575 struct intel_atomic_state *intel_state;
12576 struct intel_crtc *crtc;
12577 struct intel_crtc_state *crtc_state;
12578 struct drm_modeset_acquire_ctx ctx;
12582 /* Only supported on platforms that use atomic watermark design */
12583 if (!dev_priv->display.optimize_watermarks)
12586 state = drm_atomic_state_alloc(&dev_priv->drm);
12587 if (drm_WARN_ON(&dev_priv->drm, !state))
12590 intel_state = to_intel_atomic_state(state);
12592 drm_modeset_acquire_init(&ctx, 0);
12595 state->acquire_ctx = &ctx;
12598 * Hardware readout is the only time we don't want to calculate
12599 * intermediate watermarks (since we don't trust the current
12602 if (!HAS_GMCH(dev_priv))
12603 intel_state->skip_intermediate_wm = true;
12605 ret = sanitize_watermarks_add_affected(state);
12609 ret = intel_atomic_check(&dev_priv->drm, state);
12613 /* Write calculated watermark values back */
12614 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12615 crtc_state->wm.need_postvbl_update = true;
12616 dev_priv->display.optimize_watermarks(intel_state, crtc);
12618 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12622 if (ret == -EDEADLK) {
12623 drm_atomic_state_clear(state);
12624 drm_modeset_backoff(&ctx);
12629 * If we fail here, it means that the hardware appears to be
12630 * programmed in a way that shouldn't be possible, given our
12631 * understanding of watermark requirements. This might mean a
12632 * mistake in the hardware readout code or a mistake in the
12633 * watermark calculations for a given platform. Raise a WARN
12634 * so that this is noticeable.
12636 * If this actually happens, we'll have to just leave the
12637 * BIOS-programmed watermarks untouched and hope for the best.
12639 drm_WARN(&dev_priv->drm, ret,
12640 "Could not determine valid watermarks for inherited state\n");
12642 drm_atomic_state_put(state);
12644 drm_modeset_drop_locks(&ctx);
12645 drm_modeset_acquire_fini(&ctx);
12648 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12650 if (IS_GEN(dev_priv, 5)) {
12652 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12654 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12655 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
12656 dev_priv->fdi_pll_freq = 270000;
12661 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12664 static int intel_initial_commit(struct drm_device *dev)
12666 struct drm_atomic_state *state = NULL;
12667 struct drm_modeset_acquire_ctx ctx;
12668 struct intel_crtc *crtc;
12671 state = drm_atomic_state_alloc(dev);
12675 drm_modeset_acquire_init(&ctx, 0);
12678 state->acquire_ctx = &ctx;
12680 for_each_intel_crtc(dev, crtc) {
12681 struct intel_crtc_state *crtc_state =
12682 intel_atomic_get_crtc_state(state, crtc);
12684 if (IS_ERR(crtc_state)) {
12685 ret = PTR_ERR(crtc_state);
12689 if (crtc_state->hw.active) {
12690 struct intel_encoder *encoder;
12693 * We've not yet detected sink capabilities
12694 * (audio,infoframes,etc.) and thus we don't want to
12695 * force a full state recomputation yet. We want that to
12696 * happen only for the first real commit from userspace.
12697 * So preserve the inherited flag for the time being.
12699 crtc_state->inherited = true;
12701 ret = drm_atomic_add_affected_planes(state, &crtc->base);
12706 * FIXME hack to force a LUT update to avoid the
12707 * plane update forcing the pipe gamma on without
12708 * having a proper LUT loaded. Remove once we
12709 * have readout for pipe gamma enable.
12711 crtc_state->uapi.color_mgmt_changed = true;
12713 for_each_intel_encoder_mask(dev, encoder,
12714 crtc_state->uapi.encoder_mask) {
12715 if (encoder->initial_fastset_check &&
12716 !encoder->initial_fastset_check(encoder, crtc_state)) {
12717 ret = drm_atomic_add_affected_connectors(state,
12726 ret = drm_atomic_commit(state);
12729 if (ret == -EDEADLK) {
12730 drm_atomic_state_clear(state);
12731 drm_modeset_backoff(&ctx);
12735 drm_atomic_state_put(state);
12737 drm_modeset_drop_locks(&ctx);
12738 drm_modeset_acquire_fini(&ctx);
12743 static void intel_mode_config_init(struct drm_i915_private *i915)
12745 struct drm_mode_config *mode_config = &i915->drm.mode_config;
12747 drm_mode_config_init(&i915->drm);
12748 INIT_LIST_HEAD(&i915->global_obj_list);
12750 mode_config->min_width = 0;
12751 mode_config->min_height = 0;
12753 mode_config->preferred_depth = 24;
12754 mode_config->prefer_shadow = 1;
12756 mode_config->allow_fb_modifiers = true;
12758 mode_config->funcs = &intel_mode_funcs;
12760 mode_config->async_page_flip = has_async_flips(i915);
12763 * Maximum framebuffer dimensions, chosen to match
12764 * the maximum render engine surface size on gen4+.
12766 if (INTEL_GEN(i915) >= 7) {
12767 mode_config->max_width = 16384;
12768 mode_config->max_height = 16384;
12769 } else if (INTEL_GEN(i915) >= 4) {
12770 mode_config->max_width = 8192;
12771 mode_config->max_height = 8192;
12772 } else if (IS_GEN(i915, 3)) {
12773 mode_config->max_width = 4096;
12774 mode_config->max_height = 4096;
12776 mode_config->max_width = 2048;
12777 mode_config->max_height = 2048;
12780 if (IS_I845G(i915) || IS_I865G(i915)) {
12781 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12782 mode_config->cursor_height = 1023;
12783 } else if (IS_I830(i915) || IS_I85X(i915) ||
12784 IS_I915G(i915) || IS_I915GM(i915)) {
12785 mode_config->cursor_width = 64;
12786 mode_config->cursor_height = 64;
12788 mode_config->cursor_width = 256;
12789 mode_config->cursor_height = 256;
12793 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12795 intel_atomic_global_obj_cleanup(i915);
12796 drm_mode_config_cleanup(&i915->drm);
12799 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12801 if (plane_config->fb) {
12802 struct drm_framebuffer *fb = &plane_config->fb->base;
12804 /* We may only have the stub and not a full framebuffer */
12805 if (drm_framebuffer_read_refcount(fb))
12806 drm_framebuffer_put(fb);
12811 if (plane_config->vma)
12812 i915_vma_put(plane_config->vma);
12815 /* part #1: call before irq install */
12816 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12820 if (i915_inject_probe_failure(i915))
12823 if (HAS_DISPLAY(i915)) {
12824 ret = drm_vblank_init(&i915->drm,
12825 INTEL_NUM_PIPES(i915));
12830 intel_bios_init(i915);
12832 ret = intel_vga_register(i915);
12836 /* FIXME: completely on the wrong abstraction layer */
12837 intel_power_domains_init_hw(i915, false);
12839 intel_csr_ucode_init(i915);
12841 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12842 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12843 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12845 i915->framestart_delay = 1; /* 1-4 */
12847 intel_mode_config_init(i915);
12849 ret = intel_cdclk_init(i915);
12851 goto cleanup_vga_client_pw_domain_csr;
12853 ret = intel_dbuf_init(i915);
12855 goto cleanup_vga_client_pw_domain_csr;
12857 ret = intel_bw_init(i915);
12859 goto cleanup_vga_client_pw_domain_csr;
12861 init_llist_head(&i915->atomic_helper.free_list);
12862 INIT_WORK(&i915->atomic_helper.free_work,
12863 intel_atomic_helper_free_state_worker);
12865 intel_init_quirks(i915);
12867 intel_fbc_init(i915);
12871 cleanup_vga_client_pw_domain_csr:
12872 intel_csr_ucode_fini(i915);
12873 intel_power_domains_driver_remove(i915);
12874 intel_vga_unregister(i915);
12876 intel_bios_driver_remove(i915);
12881 /* part #2: call after irq install, but before gem init */
12882 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12884 struct drm_device *dev = &i915->drm;
12886 struct intel_crtc *crtc;
12889 intel_init_pm(i915);
12891 intel_panel_sanitize_ssc(i915);
12893 intel_pps_setup(i915);
12895 intel_gmbus_setup(i915);
12897 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12898 INTEL_NUM_PIPES(i915),
12899 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12901 if (HAS_DISPLAY(i915)) {
12902 for_each_pipe(i915, pipe) {
12903 ret = intel_crtc_init(i915, pipe);
12905 intel_mode_config_cleanup(i915);
12911 intel_plane_possible_crtcs_init(i915);
12912 intel_shared_dpll_init(dev);
12913 intel_update_fdi_pll_freq(i915);
12915 intel_update_czclk(i915);
12916 intel_modeset_init_hw(i915);
12918 intel_hdcp_component_init(i915);
12920 if (i915->max_cdclk_freq == 0)
12921 intel_update_max_cdclk(i915);
12924 * If the platform has HTI, we need to find out whether it has reserved
12925 * any display resources before we create our display outputs.
12927 if (INTEL_INFO(i915)->display.has_hti)
12928 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12930 /* Just disable it once at startup */
12931 intel_vga_disable(i915);
12932 intel_setup_outputs(i915);
12934 drm_modeset_lock_all(dev);
12935 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12936 drm_modeset_unlock_all(dev);
12938 for_each_intel_crtc(dev, crtc) {
12939 struct intel_initial_plane_config plane_config = {};
12941 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12945 * Note that reserving the BIOS fb up front prevents us
12946 * from stuffing other stolen allocations like the ring
12947 * on top. This prevents some ugliness at boot time, and
12948 * can even allow for smooth boot transitions if the BIOS
12949 * fb is large enough for the active pipe configuration.
12951 i915->display.get_initial_plane_config(crtc, &plane_config);
12954 * If the fb is shared between multiple heads, we'll
12955 * just get the first one.
12957 intel_find_initial_plane_obj(crtc, &plane_config);
12959 plane_config_fini(&plane_config);
12963 * Make sure hardware watermarks really match the state we read out.
12964 * Note that we need to do this after reconstructing the BIOS fb's
12965 * since the watermark calculation done here will use pstate->fb.
12967 if (!HAS_GMCH(i915))
12968 sanitize_watermarks(i915);
12973 /* part #3: call after gem init */
12974 int intel_modeset_init(struct drm_i915_private *i915)
12978 if (!HAS_DISPLAY(i915))
12982 * Force all active planes to recompute their states. So that on
12983 * mode_setcrtc after probe, all the intel_plane_state variables
12984 * are already calculated and there is no assert_plane warnings
12987 ret = intel_initial_commit(&i915->drm);
12989 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12991 intel_overlay_setup(i915);
12993 ret = intel_fbdev_init(&i915->drm);
12997 /* Only enable hotplug handling once the fbdev is fully set up. */
12998 intel_hpd_init(i915);
12999 intel_hpd_poll_disable(i915);
13001 intel_init_ipc(i915);
13006 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
13008 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13009 /* 640x480@60Hz, ~25175 kHz */
13010 struct dpll clock = {
13020 drm_WARN_ON(&dev_priv->drm,
13021 i9xx_calc_dpll_params(48000, &clock) != 25154);
13023 drm_dbg_kms(&dev_priv->drm,
13024 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
13025 pipe_name(pipe), clock.vco, clock.dot);
13027 fp = i9xx_dpll_compute_fp(&clock);
13028 dpll = DPLL_DVO_2X_MODE |
13029 DPLL_VGA_MODE_DIS |
13030 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
13031 PLL_P2_DIVIDE_BY_4 |
13032 PLL_REF_INPUT_DREFCLK |
13035 intel_de_write(dev_priv, FP0(pipe), fp);
13036 intel_de_write(dev_priv, FP1(pipe), fp);
13038 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
13039 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
13040 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
13041 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
13042 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
13043 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
13044 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
13047 * Apparently we need to have VGA mode enabled prior to changing
13048 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
13049 * dividers, even though the register value does change.
13051 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
13052 intel_de_write(dev_priv, DPLL(pipe), dpll);
13054 /* Wait for the clocks to stabilize. */
13055 intel_de_posting_read(dev_priv, DPLL(pipe));
13058 /* The pixel multiplier can only be updated once the
13059 * DPLL is enabled and the clocks are stable.
13061 * So write it again.
13063 intel_de_write(dev_priv, DPLL(pipe), dpll);
13065 /* We do this three times for luck */
13066 for (i = 0; i < 3 ; i++) {
13067 intel_de_write(dev_priv, DPLL(pipe), dpll);
13068 intel_de_posting_read(dev_priv, DPLL(pipe));
13069 udelay(150); /* wait for warmup */
13072 intel_de_write(dev_priv, PIPECONF(pipe),
13073 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
13074 intel_de_posting_read(dev_priv, PIPECONF(pipe));
13076 intel_wait_for_pipe_scanline_moving(crtc);
13079 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
13081 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13083 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
13086 drm_WARN_ON(&dev_priv->drm,
13087 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
13088 DISPLAY_PLANE_ENABLE);
13089 drm_WARN_ON(&dev_priv->drm,
13090 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
13091 DISPLAY_PLANE_ENABLE);
13092 drm_WARN_ON(&dev_priv->drm,
13093 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
13094 DISPLAY_PLANE_ENABLE);
13095 drm_WARN_ON(&dev_priv->drm,
13096 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
13097 drm_WARN_ON(&dev_priv->drm,
13098 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
13100 intel_de_write(dev_priv, PIPECONF(pipe), 0);
13101 intel_de_posting_read(dev_priv, PIPECONF(pipe));
13103 intel_wait_for_pipe_scanline_stopped(crtc);
13105 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
13106 intel_de_posting_read(dev_priv, DPLL(pipe));
13110 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
13112 struct intel_crtc *crtc;
13114 if (INTEL_GEN(dev_priv) >= 4)
13117 for_each_intel_crtc(&dev_priv->drm, crtc) {
13118 struct intel_plane *plane =
13119 to_intel_plane(crtc->base.primary);
13120 struct intel_crtc *plane_crtc;
13123 if (!plane->get_hw_state(plane, &pipe))
13126 if (pipe == crtc->pipe)
13129 drm_dbg_kms(&dev_priv->drm,
13130 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
13131 plane->base.base.id, plane->base.name);
13133 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13134 intel_plane_disable_noatomic(plane_crtc, plane);
13138 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
13140 struct drm_device *dev = crtc->base.dev;
13141 struct intel_encoder *encoder;
13143 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
13149 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
13151 struct drm_device *dev = encoder->base.dev;
13152 struct intel_connector *connector;
13154 for_each_connector_on_encoder(dev, &encoder->base, connector)
13160 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
13161 enum pipe pch_transcoder)
13163 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
13164 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
13167 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
13169 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13170 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13171 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
13173 if (INTEL_GEN(dev_priv) >= 9 ||
13174 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
13175 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
13178 if (transcoder_is_dsi(cpu_transcoder))
13181 val = intel_de_read(dev_priv, reg);
13182 val &= ~HSW_FRAME_START_DELAY_MASK;
13183 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13184 intel_de_write(dev_priv, reg, val);
13186 i915_reg_t reg = PIPECONF(cpu_transcoder);
13189 val = intel_de_read(dev_priv, reg);
13190 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
13191 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13192 intel_de_write(dev_priv, reg, val);
13195 if (!crtc_state->has_pch_encoder)
13198 if (HAS_PCH_IBX(dev_priv)) {
13199 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
13202 val = intel_de_read(dev_priv, reg);
13203 val &= ~TRANS_FRAME_START_DELAY_MASK;
13204 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13205 intel_de_write(dev_priv, reg, val);
13207 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
13208 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
13211 val = intel_de_read(dev_priv, reg);
13212 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
13213 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13214 intel_de_write(dev_priv, reg, val);
13218 static void intel_sanitize_crtc(struct intel_crtc *crtc,
13219 struct drm_modeset_acquire_ctx *ctx)
13221 struct drm_device *dev = crtc->base.dev;
13222 struct drm_i915_private *dev_priv = to_i915(dev);
13223 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
13225 if (crtc_state->hw.active) {
13226 struct intel_plane *plane;
13228 /* Clear any frame start delays used for debugging left by the BIOS */
13229 intel_sanitize_frame_start_delay(crtc_state);
13231 /* Disable everything but the primary plane */
13232 for_each_intel_plane_on_crtc(dev, crtc, plane) {
13233 const struct intel_plane_state *plane_state =
13234 to_intel_plane_state(plane->base.state);
13236 if (plane_state->uapi.visible &&
13237 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
13238 intel_plane_disable_noatomic(crtc, plane);
13242 * Disable any background color set by the BIOS, but enable the
13243 * gamma and CSC to match how we program our planes.
13245 if (INTEL_GEN(dev_priv) >= 9)
13246 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
13247 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
13250 /* Adjust the state of the output pipe according to whether we
13251 * have active connectors/encoders. */
13252 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
13253 !crtc_state->bigjoiner_slave)
13254 intel_crtc_disable_noatomic(crtc, ctx);
13256 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
13258 * We start out with underrun reporting disabled to avoid races.
13259 * For correct bookkeeping mark this on active crtcs.
13261 * Also on gmch platforms we dont have any hardware bits to
13262 * disable the underrun reporting. Which means we need to start
13263 * out with underrun reporting disabled also on inactive pipes,
13264 * since otherwise we'll complain about the garbage we read when
13265 * e.g. coming up after runtime pm.
13267 * No protection against concurrent access is required - at
13268 * worst a fifo underrun happens which also sets this to false.
13270 crtc->cpu_fifo_underrun_disabled = true;
13272 * We track the PCH trancoder underrun reporting state
13273 * within the crtc. With crtc for pipe A housing the underrun
13274 * reporting state for PCH transcoder A, crtc for pipe B housing
13275 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
13276 * and marking underrun reporting as disabled for the non-existing
13277 * PCH transcoders B and C would prevent enabling the south
13278 * error interrupt (see cpt_can_enable_serr_int()).
13280 if (has_pch_trancoder(dev_priv, crtc->pipe))
13281 crtc->pch_fifo_underrun_disabled = true;
13285 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
13287 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
13290 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
13291 * the hardware when a high res displays plugged in. DPLL P
13292 * divider is zero, and the pipe timings are bonkers. We'll
13293 * try to disable everything in that case.
13295 * FIXME would be nice to be able to sanitize this state
13296 * without several WARNs, but for now let's take the easy
13299 return IS_GEN(dev_priv, 6) &&
13300 crtc_state->hw.active &&
13301 crtc_state->shared_dpll &&
13302 crtc_state->port_clock == 0;
13305 static void intel_sanitize_encoder(struct intel_encoder *encoder)
13307 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
13308 struct intel_connector *connector;
13309 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
13310 struct intel_crtc_state *crtc_state = crtc ?
13311 to_intel_crtc_state(crtc->base.state) : NULL;
13313 /* We need to check both for a crtc link (meaning that the
13314 * encoder is active and trying to read from a pipe) and the
13315 * pipe itself being active. */
13316 bool has_active_crtc = crtc_state &&
13317 crtc_state->hw.active;
13319 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
13320 drm_dbg_kms(&dev_priv->drm,
13321 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
13322 pipe_name(crtc->pipe));
13323 has_active_crtc = false;
13326 connector = intel_encoder_find_connector(encoder);
13327 if (connector && !has_active_crtc) {
13328 drm_dbg_kms(&dev_priv->drm,
13329 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13330 encoder->base.base.id,
13331 encoder->base.name);
13333 /* Connector is active, but has no active pipe. This is
13334 * fallout from our resume register restoring. Disable
13335 * the encoder manually again. */
13337 struct drm_encoder *best_encoder;
13339 drm_dbg_kms(&dev_priv->drm,
13340 "[ENCODER:%d:%s] manually disabled\n",
13341 encoder->base.base.id,
13342 encoder->base.name);
13344 /* avoid oopsing in case the hooks consult best_encoder */
13345 best_encoder = connector->base.state->best_encoder;
13346 connector->base.state->best_encoder = &encoder->base;
13348 /* FIXME NULL atomic state passed! */
13349 if (encoder->disable)
13350 encoder->disable(NULL, encoder, crtc_state,
13351 connector->base.state);
13352 if (encoder->post_disable)
13353 encoder->post_disable(NULL, encoder, crtc_state,
13354 connector->base.state);
13356 connector->base.state->best_encoder = best_encoder;
13358 encoder->base.crtc = NULL;
13360 /* Inconsistent output/port/pipe state happens presumably due to
13361 * a bug in one of the get_hw_state functions. Or someplace else
13362 * in our code, like the register restore mess on resume. Clamp
13363 * things to off as a safer default. */
13365 connector->base.dpms = DRM_MODE_DPMS_OFF;
13366 connector->base.encoder = NULL;
13369 /* notify opregion of the sanitized encoder state */
13370 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
13372 if (INTEL_GEN(dev_priv) >= 11)
13373 icl_sanitize_encoder_pll_mapping(encoder);
13376 /* FIXME read out full plane state for all planes */
13377 static void readout_plane_state(struct drm_i915_private *dev_priv)
13379 struct intel_plane *plane;
13380 struct intel_crtc *crtc;
13382 for_each_intel_plane(&dev_priv->drm, plane) {
13383 struct intel_plane_state *plane_state =
13384 to_intel_plane_state(plane->base.state);
13385 struct intel_crtc_state *crtc_state;
13386 enum pipe pipe = PIPE_A;
13389 visible = plane->get_hw_state(plane, &pipe);
13391 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13392 crtc_state = to_intel_crtc_state(crtc->base.state);
13394 intel_set_plane_visible(crtc_state, plane_state, visible);
13396 drm_dbg_kms(&dev_priv->drm,
13397 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
13398 plane->base.base.id, plane->base.name,
13399 enableddisabled(visible), pipe_name(pipe));
13402 for_each_intel_crtc(&dev_priv->drm, crtc) {
13403 struct intel_crtc_state *crtc_state =
13404 to_intel_crtc_state(crtc->base.state);
13406 fixup_plane_bitmasks(crtc_state);
13410 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13412 struct drm_i915_private *dev_priv = to_i915(dev);
13413 struct intel_cdclk_state *cdclk_state =
13414 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
13415 struct intel_dbuf_state *dbuf_state =
13416 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
13418 struct intel_crtc *crtc;
13419 struct intel_encoder *encoder;
13420 struct intel_connector *connector;
13421 struct drm_connector_list_iter conn_iter;
13422 u8 active_pipes = 0;
13424 for_each_intel_crtc(dev, crtc) {
13425 struct intel_crtc_state *crtc_state =
13426 to_intel_crtc_state(crtc->base.state);
13428 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
13429 intel_crtc_free_hw_state(crtc_state);
13430 intel_crtc_state_reset(crtc_state, crtc);
13432 intel_crtc_get_pipe_config(crtc_state);
13434 crtc_state->hw.enable = crtc_state->hw.active;
13436 crtc->base.enabled = crtc_state->hw.enable;
13437 crtc->active = crtc_state->hw.active;
13439 if (crtc_state->hw.active)
13440 active_pipes |= BIT(crtc->pipe);
13442 drm_dbg_kms(&dev_priv->drm,
13443 "[CRTC:%d:%s] hw state readout: %s\n",
13444 crtc->base.base.id, crtc->base.name,
13445 enableddisabled(crtc_state->hw.active));
13448 dev_priv->active_pipes = cdclk_state->active_pipes =
13449 dbuf_state->active_pipes = active_pipes;
13451 readout_plane_state(dev_priv);
13453 intel_dpll_readout_hw_state(dev_priv);
13455 for_each_intel_encoder(dev, encoder) {
13458 if (encoder->get_hw_state(encoder, &pipe)) {
13459 struct intel_crtc_state *crtc_state;
13461 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13462 crtc_state = to_intel_crtc_state(crtc->base.state);
13464 encoder->base.crtc = &crtc->base;
13465 intel_encoder_get_config(encoder, crtc_state);
13466 if (encoder->sync_state)
13467 encoder->sync_state(encoder, crtc_state);
13469 /* read out to slave crtc as well for bigjoiner */
13470 if (crtc_state->bigjoiner) {
13471 /* encoder should read be linked to bigjoiner master */
13472 WARN_ON(crtc_state->bigjoiner_slave);
13474 crtc = crtc_state->bigjoiner_linked_crtc;
13475 crtc_state = to_intel_crtc_state(crtc->base.state);
13476 intel_encoder_get_config(encoder, crtc_state);
13479 encoder->base.crtc = NULL;
13482 drm_dbg_kms(&dev_priv->drm,
13483 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13484 encoder->base.base.id, encoder->base.name,
13485 enableddisabled(encoder->base.crtc),
13489 drm_connector_list_iter_begin(dev, &conn_iter);
13490 for_each_intel_connector_iter(connector, &conn_iter) {
13491 if (connector->get_hw_state(connector)) {
13492 struct intel_crtc_state *crtc_state;
13493 struct intel_crtc *crtc;
13495 connector->base.dpms = DRM_MODE_DPMS_ON;
13497 encoder = intel_attached_encoder(connector);
13498 connector->base.encoder = &encoder->base;
13500 crtc = to_intel_crtc(encoder->base.crtc);
13501 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
13503 if (crtc_state && crtc_state->hw.active) {
13505 * This has to be done during hardware readout
13506 * because anything calling .crtc_disable may
13507 * rely on the connector_mask being accurate.
13509 crtc_state->uapi.connector_mask |=
13510 drm_connector_mask(&connector->base);
13511 crtc_state->uapi.encoder_mask |=
13512 drm_encoder_mask(&encoder->base);
13515 connector->base.dpms = DRM_MODE_DPMS_OFF;
13516 connector->base.encoder = NULL;
13518 drm_dbg_kms(&dev_priv->drm,
13519 "[CONNECTOR:%d:%s] hw state readout: %s\n",
13520 connector->base.base.id, connector->base.name,
13521 enableddisabled(connector->base.encoder));
13523 drm_connector_list_iter_end(&conn_iter);
13525 for_each_intel_crtc(dev, crtc) {
13526 struct intel_bw_state *bw_state =
13527 to_intel_bw_state(dev_priv->bw_obj.state);
13528 struct intel_crtc_state *crtc_state =
13529 to_intel_crtc_state(crtc->base.state);
13530 struct intel_plane *plane;
13533 if (crtc_state->bigjoiner_slave)
13536 if (crtc_state->hw.active) {
13538 * The initial mode needs to be set in order to keep
13539 * the atomic core happy. It wants a valid mode if the
13540 * crtc's enabled, so we do the above call.
13542 * But we don't set all the derived state fully, hence
13543 * set a flag to indicate that a full recalculation is
13544 * needed on the next commit.
13546 crtc_state->inherited = true;
13548 intel_crtc_update_active_timings(crtc_state);
13550 intel_crtc_copy_hw_to_uapi_state(crtc_state);
13553 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13554 const struct intel_plane_state *plane_state =
13555 to_intel_plane_state(plane->base.state);
13558 * FIXME don't have the fb yet, so can't
13559 * use intel_plane_data_rate() :(
13561 if (plane_state->uapi.visible)
13562 crtc_state->data_rate[plane->id] =
13563 4 * crtc_state->pixel_rate;
13565 * FIXME don't have the fb yet, so can't
13566 * use plane->min_cdclk() :(
13568 if (plane_state->uapi.visible && plane->min_cdclk) {
13569 if (crtc_state->double_wide ||
13570 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13571 crtc_state->min_cdclk[plane->id] =
13572 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13574 crtc_state->min_cdclk[plane->id] =
13575 crtc_state->pixel_rate;
13577 drm_dbg_kms(&dev_priv->drm,
13578 "[PLANE:%d:%s] min_cdclk %d kHz\n",
13579 plane->base.base.id, plane->base.name,
13580 crtc_state->min_cdclk[plane->id]);
13583 if (crtc_state->hw.active) {
13584 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13585 if (drm_WARN_ON(dev, min_cdclk < 0))
13589 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13590 cdclk_state->min_voltage_level[crtc->pipe] =
13591 crtc_state->min_voltage_level;
13593 intel_bw_crtc_update(bw_state, crtc_state);
13595 intel_pipe_config_sanity_check(dev_priv, crtc_state);
13597 /* discard our incomplete slave state, copy it from master */
13598 if (crtc_state->bigjoiner && crtc_state->hw.active) {
13599 struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13600 struct intel_crtc_state *slave_crtc_state =
13601 to_intel_crtc_state(slave->base.state);
13603 copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13604 slave->base.mode = crtc->base.mode;
13606 cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13607 cdclk_state->min_voltage_level[slave->pipe] =
13608 crtc_state->min_voltage_level;
13610 for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13611 const struct intel_plane_state *plane_state =
13612 to_intel_plane_state(plane->base.state);
13615 * FIXME don't have the fb yet, so can't
13616 * use intel_plane_data_rate() :(
13618 if (plane_state->uapi.visible)
13619 crtc_state->data_rate[plane->id] =
13620 4 * crtc_state->pixel_rate;
13622 crtc_state->data_rate[plane->id] = 0;
13625 intel_bw_crtc_update(bw_state, slave_crtc_state);
13626 drm_calc_timestamping_constants(&slave->base,
13627 &slave_crtc_state->hw.adjusted_mode);
13633 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13635 struct intel_encoder *encoder;
13637 for_each_intel_encoder(&dev_priv->drm, encoder) {
13638 struct intel_crtc_state *crtc_state;
13640 if (!encoder->get_power_domains)
13644 * MST-primary and inactive encoders don't have a crtc state
13645 * and neither of these require any power domain references.
13647 if (!encoder->base.crtc)
13650 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13651 encoder->get_power_domains(encoder, crtc_state);
13655 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13658 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
13659 * Also known as Wa_14010480278.
13661 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
13662 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13663 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13665 if (IS_HASWELL(dev_priv)) {
13667 * WaRsPkgCStateDisplayPMReq:hsw
13668 * System hang if this isn't done before disabling all planes!
13670 intel_de_write(dev_priv, CHICKEN_PAR1_1,
13671 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13674 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13675 /* Display WA #1142:kbl,cfl,cml */
13676 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13677 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13678 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13679 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13680 KBL_ARB_FILL_SPARE_14);
13684 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13685 enum port port, i915_reg_t hdmi_reg)
13687 u32 val = intel_de_read(dev_priv, hdmi_reg);
13689 if (val & SDVO_ENABLE ||
13690 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13693 drm_dbg_kms(&dev_priv->drm,
13694 "Sanitizing transcoder select for HDMI %c\n",
13697 val &= ~SDVO_PIPE_SEL_MASK;
13698 val |= SDVO_PIPE_SEL(PIPE_A);
13700 intel_de_write(dev_priv, hdmi_reg, val);
13703 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13704 enum port port, i915_reg_t dp_reg)
13706 u32 val = intel_de_read(dev_priv, dp_reg);
13708 if (val & DP_PORT_EN ||
13709 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13712 drm_dbg_kms(&dev_priv->drm,
13713 "Sanitizing transcoder select for DP %c\n",
13716 val &= ~DP_PIPE_SEL_MASK;
13717 val |= DP_PIPE_SEL(PIPE_A);
13719 intel_de_write(dev_priv, dp_reg, val);
13722 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13725 * The BIOS may select transcoder B on some of the PCH
13726 * ports even it doesn't enable the port. This would trip
13727 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13728 * Sanitize the transcoder select bits to prevent that. We
13729 * assume that the BIOS never actually enabled the port,
13730 * because if it did we'd actually have to toggle the port
13731 * on and back off to make the transcoder A select stick
13732 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13733 * intel_disable_sdvo()).
13735 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13736 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13737 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13739 /* PCH SDVOB multiplex with HDMIB */
13740 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13741 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13742 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13745 /* Scan out the current hw modeset state,
13746 * and sanitizes it to the current state
13749 intel_modeset_setup_hw_state(struct drm_device *dev,
13750 struct drm_modeset_acquire_ctx *ctx)
13752 struct drm_i915_private *dev_priv = to_i915(dev);
13753 struct intel_encoder *encoder;
13754 struct intel_crtc *crtc;
13755 intel_wakeref_t wakeref;
13757 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13759 intel_early_display_was(dev_priv);
13760 intel_modeset_readout_hw_state(dev);
13762 /* HW state is read out, now we need to sanitize this mess. */
13764 /* Sanitize the TypeC port mode upfront, encoders depend on this */
13765 for_each_intel_encoder(dev, encoder) {
13766 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
13768 /* We need to sanitize only the MST primary port. */
13769 if (encoder->type != INTEL_OUTPUT_DP_MST &&
13770 intel_phy_is_tc(dev_priv, phy))
13771 intel_tc_port_sanitize(enc_to_dig_port(encoder));
13774 get_encoder_power_domains(dev_priv);
13776 if (HAS_PCH_IBX(dev_priv))
13777 ibx_sanitize_pch_ports(dev_priv);
13780 * intel_sanitize_plane_mapping() may need to do vblank
13781 * waits, so we need vblank interrupts restored beforehand.
13783 for_each_intel_crtc(&dev_priv->drm, crtc) {
13784 struct intel_crtc_state *crtc_state =
13785 to_intel_crtc_state(crtc->base.state);
13787 drm_crtc_vblank_reset(&crtc->base);
13789 if (crtc_state->hw.active)
13790 intel_crtc_vblank_on(crtc_state);
13793 intel_sanitize_plane_mapping(dev_priv);
13795 for_each_intel_encoder(dev, encoder)
13796 intel_sanitize_encoder(encoder);
13798 for_each_intel_crtc(&dev_priv->drm, crtc) {
13799 struct intel_crtc_state *crtc_state =
13800 to_intel_crtc_state(crtc->base.state);
13802 intel_sanitize_crtc(crtc, ctx);
13803 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13806 intel_modeset_update_connector_atomic_state(dev);
13808 intel_dpll_sanitize_state(dev_priv);
13810 if (IS_G4X(dev_priv)) {
13811 g4x_wm_get_hw_state(dev_priv);
13812 g4x_wm_sanitize(dev_priv);
13813 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13814 vlv_wm_get_hw_state(dev_priv);
13815 vlv_wm_sanitize(dev_priv);
13816 } else if (INTEL_GEN(dev_priv) >= 9) {
13817 skl_wm_get_hw_state(dev_priv);
13818 } else if (HAS_PCH_SPLIT(dev_priv)) {
13819 ilk_wm_get_hw_state(dev_priv);
13822 for_each_intel_crtc(dev, crtc) {
13823 struct intel_crtc_state *crtc_state =
13824 to_intel_crtc_state(crtc->base.state);
13827 put_domains = modeset_get_crtc_power_domains(crtc_state);
13828 if (drm_WARN_ON(dev, put_domains))
13829 modeset_put_crtc_power_domains(crtc, put_domains);
13832 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13835 void intel_display_resume(struct drm_device *dev)
13837 struct drm_i915_private *dev_priv = to_i915(dev);
13838 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13839 struct drm_modeset_acquire_ctx ctx;
13842 dev_priv->modeset_restore_state = NULL;
13844 state->acquire_ctx = &ctx;
13846 drm_modeset_acquire_init(&ctx, 0);
13849 ret = drm_modeset_lock_all_ctx(dev, &ctx);
13850 if (ret != -EDEADLK)
13853 drm_modeset_backoff(&ctx);
13857 ret = __intel_display_resume(dev, state, &ctx);
13859 intel_enable_ipc(dev_priv);
13860 drm_modeset_drop_locks(&ctx);
13861 drm_modeset_acquire_fini(&ctx);
13864 drm_err(&dev_priv->drm,
13865 "Restoring old state failed with %i\n", ret);
13867 drm_atomic_state_put(state);
13870 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13872 struct intel_connector *connector;
13873 struct drm_connector_list_iter conn_iter;
13875 /* Kill all the work that may have been queued by hpd. */
13876 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13877 for_each_intel_connector_iter(connector, &conn_iter) {
13878 if (connector->modeset_retry_work.func)
13879 cancel_work_sync(&connector->modeset_retry_work);
13880 if (connector->hdcp.shim) {
13881 cancel_delayed_work_sync(&connector->hdcp.check_work);
13882 cancel_work_sync(&connector->hdcp.prop_work);
13885 drm_connector_list_iter_end(&conn_iter);
13888 /* part #1: call before irq uninstall */
13889 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13891 flush_workqueue(i915->flip_wq);
13892 flush_workqueue(i915->modeset_wq);
13894 flush_work(&i915->atomic_helper.free_work);
13895 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13898 /* part #2: call after irq uninstall */
13899 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13902 * Due to the hpd irq storm handling the hotplug work can re-arm the
13903 * poll handlers. Hence disable polling after hpd handling is shut down.
13905 intel_hpd_poll_fini(i915);
13908 * MST topology needs to be suspended so we don't have any calls to
13909 * fbdev after it's finalized. MST will be destroyed later as part of
13910 * drm_mode_config_cleanup()
13912 intel_dp_mst_suspend(i915);
13914 /* poll work can call into fbdev, hence clean that up afterwards */
13915 intel_fbdev_fini(i915);
13917 intel_unregister_dsm_handler();
13919 intel_fbc_global_disable(i915);
13921 /* flush any delayed tasks or pending work */
13922 flush_scheduled_work();
13924 intel_hdcp_component_fini(i915);
13926 intel_mode_config_cleanup(i915);
13928 intel_overlay_cleanup(i915);
13930 intel_gmbus_teardown(i915);
13932 destroy_workqueue(i915->flip_wq);
13933 destroy_workqueue(i915->modeset_wq);
13935 intel_fbc_cleanup_cfb(i915);
13938 /* part #3: call after gem init */
13939 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13941 intel_csr_ucode_fini(i915);
13943 intel_power_domains_driver_remove(i915);
13945 intel_vga_unregister(i915);
13947 intel_bios_driver_remove(i915);
13950 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
13952 struct intel_display_error_state {
13954 u32 power_well_driver;
13956 struct intel_cursor_error_state {
13961 } cursor[I915_MAX_PIPES];
13963 struct intel_pipe_error_state {
13964 bool power_domain_on;
13967 } pipe[I915_MAX_PIPES];
13969 struct intel_plane_error_state {
13977 } plane[I915_MAX_PIPES];
13979 struct intel_transcoder_error_state {
13981 bool power_domain_on;
13982 enum transcoder cpu_transcoder;
13995 struct intel_display_error_state *
13996 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
13998 struct intel_display_error_state *error;
13999 int transcoders[] = {
14008 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
14010 if (!HAS_DISPLAY(dev_priv))
14013 error = kzalloc(sizeof(*error), GFP_ATOMIC);
14017 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
14018 error->power_well_driver = intel_de_read(dev_priv,
14019 HSW_PWR_WELL_CTL2);
14021 for_each_pipe(dev_priv, i) {
14022 error->pipe[i].power_domain_on =
14023 __intel_display_power_is_enabled(dev_priv,
14024 POWER_DOMAIN_PIPE(i));
14025 if (!error->pipe[i].power_domain_on)
14028 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
14029 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
14030 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
14032 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
14033 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
14034 if (INTEL_GEN(dev_priv) <= 3) {
14035 error->plane[i].size = intel_de_read(dev_priv,
14037 error->plane[i].pos = intel_de_read(dev_priv,
14040 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
14041 error->plane[i].addr = intel_de_read(dev_priv,
14043 if (INTEL_GEN(dev_priv) >= 4) {
14044 error->plane[i].surface = intel_de_read(dev_priv,
14046 error->plane[i].tile_offset = intel_de_read(dev_priv,
14050 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
14052 if (HAS_GMCH(dev_priv))
14053 error->pipe[i].stat = intel_de_read(dev_priv,
14057 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
14058 enum transcoder cpu_transcoder = transcoders[i];
14060 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
14063 error->transcoder[i].available = true;
14064 error->transcoder[i].power_domain_on =
14065 __intel_display_power_is_enabled(dev_priv,
14066 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
14067 if (!error->transcoder[i].power_domain_on)
14070 error->transcoder[i].cpu_transcoder = cpu_transcoder;
14072 error->transcoder[i].conf = intel_de_read(dev_priv,
14073 PIPECONF(cpu_transcoder));
14074 error->transcoder[i].htotal = intel_de_read(dev_priv,
14075 HTOTAL(cpu_transcoder));
14076 error->transcoder[i].hblank = intel_de_read(dev_priv,
14077 HBLANK(cpu_transcoder));
14078 error->transcoder[i].hsync = intel_de_read(dev_priv,
14079 HSYNC(cpu_transcoder));
14080 error->transcoder[i].vtotal = intel_de_read(dev_priv,
14081 VTOTAL(cpu_transcoder));
14082 error->transcoder[i].vblank = intel_de_read(dev_priv,
14083 VBLANK(cpu_transcoder));
14084 error->transcoder[i].vsync = intel_de_read(dev_priv,
14085 VSYNC(cpu_transcoder));
14091 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
14094 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
14095 struct intel_display_error_state *error)
14097 struct drm_i915_private *dev_priv = m->i915;
14103 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
14104 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
14105 err_printf(m, "PWR_WELL_CTL2: %08x\n",
14106 error->power_well_driver);
14107 for_each_pipe(dev_priv, i) {
14108 err_printf(m, "Pipe [%d]:\n", i);
14109 err_printf(m, " Power: %s\n",
14110 onoff(error->pipe[i].power_domain_on));
14111 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
14112 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
14114 err_printf(m, "Plane [%d]:\n", i);
14115 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
14116 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
14117 if (INTEL_GEN(dev_priv) <= 3) {
14118 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
14119 err_printf(m, " POS: %08x\n", error->plane[i].pos);
14121 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
14122 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
14123 if (INTEL_GEN(dev_priv) >= 4) {
14124 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
14125 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
14128 err_printf(m, "Cursor [%d]:\n", i);
14129 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
14130 err_printf(m, " POS: %08x\n", error->cursor[i].position);
14131 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
14134 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
14135 if (!error->transcoder[i].available)
14138 err_printf(m, "CPU transcoder: %s\n",
14139 transcoder_name(error->transcoder[i].cpu_transcoder));
14140 err_printf(m, " Power: %s\n",
14141 onoff(error->transcoder[i].power_domain_on));
14142 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
14143 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
14144 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
14145 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
14146 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
14147 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
14148 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);