2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_gmbus.h"
58 #include "display/intel_hdmi.h"
59 #include "display/intel_lvds.h"
60 #include "display/intel_sdvo.h"
61 #include "display/intel_tv.h"
62 #include "display/intel_vdsc.h"
63 #include "display/intel_vrr.h"
65 #include "gem/i915_gem_object.h"
67 #include "gt/intel_rps.h"
72 #include "intel_acpi.h"
73 #include "intel_atomic.h"
74 #include "intel_atomic_plane.h"
76 #include "intel_cdclk.h"
77 #include "intel_color.h"
78 #include "intel_crtc.h"
79 #include "intel_csr.h"
80 #include "intel_display_types.h"
81 #include "intel_dp_link_training.h"
82 #include "intel_fbc.h"
83 #include "intel_fdi.h"
84 #include "intel_fbdev.h"
85 #include "intel_fifo_underrun.h"
86 #include "intel_frontbuffer.h"
87 #include "intel_hdcp.h"
88 #include "intel_hotplug.h"
89 #include "intel_overlay.h"
90 #include "intel_pipe_crc.h"
92 #include "intel_pps.h"
93 #include "intel_psr.h"
94 #include "intel_quirks.h"
95 #include "intel_sideband.h"
96 #include "intel_sprite.h"
98 #include "intel_vga.h"
99 #include "i9xx_plane.h"
100 #include "skl_scaler.h"
101 #include "skl_universal_plane.h"
103 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
104 struct intel_crtc_state *pipe_config);
105 static void ilk_pch_clock_get(struct intel_crtc *crtc,
106 struct intel_crtc_state *pipe_config);
108 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
109 struct drm_i915_gem_object *obj,
110 struct drm_mode_fb_cmd2 *mode_cmd);
111 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
112 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
113 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
114 const struct intel_link_m_n *m_n,
115 const struct intel_link_m_n *m2_n2);
116 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
117 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
118 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
119 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
120 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
121 static void intel_modeset_setup_hw_state(struct drm_device *dev,
122 struct drm_modeset_acquire_ctx *ctx);
124 /* returns HPLL frequency in kHz */
125 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
127 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
129 /* Obtain SKU information */
130 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
131 CCK_FUSE_HPLL_FREQ_MASK;
133 return vco_freq[hpll_freq] * 1000;
136 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
137 const char *name, u32 reg, int ref_freq)
142 val = vlv_cck_read(dev_priv, reg);
143 divider = val & CCK_FREQUENCY_VALUES;
145 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
146 (divider << CCK_FREQUENCY_STATUS_SHIFT),
147 "%s change in progress\n", name);
149 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
152 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153 const char *name, u32 reg)
157 vlv_cck_get(dev_priv);
159 if (dev_priv->hpll_freq == 0)
160 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
162 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
164 vlv_cck_put(dev_priv);
169 static void intel_update_czclk(struct drm_i915_private *dev_priv)
171 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
174 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
175 CCK_CZ_CLOCK_CONTROL);
177 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
178 dev_priv->czclk_freq);
181 /* WA Display #0827: Gen9:all */
183 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
186 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
187 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
189 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
190 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
193 /* Wa_2006604312:icl,ehl */
195 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
199 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
200 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
202 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
203 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
207 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
209 return crtc_state->master_transcoder != INVALID_TRANSCODER;
213 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
215 return crtc_state->sync_mode_slaves_mask != 0;
219 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
221 return is_trans_port_sync_master(crtc_state) ||
222 is_trans_port_sync_slave(crtc_state);
225 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
228 i915_reg_t reg = PIPEDSL(pipe);
232 if (IS_GEN(dev_priv, 2))
233 line_mask = DSL_LINEMASK_GEN2;
235 line_mask = DSL_LINEMASK_GEN3;
237 line1 = intel_de_read(dev_priv, reg) & line_mask;
239 line2 = intel_de_read(dev_priv, reg) & line_mask;
241 return line1 != line2;
244 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
246 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
247 enum pipe pipe = crtc->pipe;
249 /* Wait for the display line to settle/start moving */
250 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
251 drm_err(&dev_priv->drm,
252 "pipe %c scanline %s wait timed out\n",
253 pipe_name(pipe), onoff(state));
256 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
258 wait_for_pipe_scanline_moving(crtc, false);
261 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
263 wait_for_pipe_scanline_moving(crtc, true);
267 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
269 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
270 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
272 if (INTEL_GEN(dev_priv) >= 4) {
273 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
274 i915_reg_t reg = PIPECONF(cpu_transcoder);
276 /* Wait for the Pipe State to go off */
277 if (intel_de_wait_for_clear(dev_priv, reg,
278 I965_PIPECONF_ACTIVE, 100))
279 drm_WARN(&dev_priv->drm, 1,
280 "pipe_off wait timed out\n");
282 intel_wait_for_pipe_scanline_stopped(crtc);
286 /* Only for pre-ILK configs */
287 void assert_pll(struct drm_i915_private *dev_priv,
288 enum pipe pipe, bool state)
293 val = intel_de_read(dev_priv, DPLL(pipe));
294 cur_state = !!(val & DPLL_VCO_ENABLE);
295 I915_STATE_WARN(cur_state != state,
296 "PLL state assertion failure (expected %s, current %s)\n",
297 onoff(state), onoff(cur_state));
300 /* XXX: the dsi pll is shared between MIPI DSI ports */
301 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
306 vlv_cck_get(dev_priv);
307 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
308 vlv_cck_put(dev_priv);
310 cur_state = val & DSI_PLL_VCO_EN;
311 I915_STATE_WARN(cur_state != state,
312 "DSI PLL state assertion failure (expected %s, current %s)\n",
313 onoff(state), onoff(cur_state));
316 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
317 enum pipe pipe, bool state)
321 if (HAS_DDI(dev_priv)) {
323 * DDI does not have a specific FDI_TX register.
325 * FDI is never fed from EDP transcoder
326 * so pipe->transcoder cast is fine here.
328 enum transcoder cpu_transcoder = (enum transcoder)pipe;
329 u32 val = intel_de_read(dev_priv,
330 TRANS_DDI_FUNC_CTL(cpu_transcoder));
331 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
333 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
334 cur_state = !!(val & FDI_TX_ENABLE);
336 I915_STATE_WARN(cur_state != state,
337 "FDI TX state assertion failure (expected %s, current %s)\n",
338 onoff(state), onoff(cur_state));
340 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
341 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
343 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
344 enum pipe pipe, bool state)
349 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
350 cur_state = !!(val & FDI_RX_ENABLE);
351 I915_STATE_WARN(cur_state != state,
352 "FDI RX state assertion failure (expected %s, current %s)\n",
353 onoff(state), onoff(cur_state));
355 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
356 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
358 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
363 /* ILK FDI PLL is always enabled */
364 if (IS_IRONLAKE(dev_priv))
367 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
368 if (HAS_DDI(dev_priv))
371 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
372 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
375 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
376 enum pipe pipe, bool state)
381 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
382 cur_state = !!(val & FDI_RX_PLL_ENABLE);
383 I915_STATE_WARN(cur_state != state,
384 "FDI RX PLL assertion failure (expected %s, current %s)\n",
385 onoff(state), onoff(cur_state));
388 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
392 enum pipe panel_pipe = INVALID_PIPE;
395 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
398 if (HAS_PCH_SPLIT(dev_priv)) {
401 pp_reg = PP_CONTROL(0);
402 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
405 case PANEL_PORT_SELECT_LVDS:
406 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
408 case PANEL_PORT_SELECT_DPA:
409 g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
411 case PANEL_PORT_SELECT_DPC:
412 g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
414 case PANEL_PORT_SELECT_DPD:
415 g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
418 MISSING_CASE(port_sel);
421 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
422 /* presumably write lock depends on pipe, not port select */
423 pp_reg = PP_CONTROL(pipe);
428 pp_reg = PP_CONTROL(0);
429 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
431 drm_WARN_ON(&dev_priv->drm,
432 port_sel != PANEL_PORT_SELECT_LVDS);
433 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
436 val = intel_de_read(dev_priv, pp_reg);
437 if (!(val & PANEL_POWER_ON) ||
438 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
441 I915_STATE_WARN(panel_pipe == pipe && locked,
442 "panel assertion failure, pipe %c regs locked\n",
446 void assert_pipe(struct drm_i915_private *dev_priv,
447 enum transcoder cpu_transcoder, bool state)
450 enum intel_display_power_domain power_domain;
451 intel_wakeref_t wakeref;
453 /* we keep both pipes enabled on 830 */
454 if (IS_I830(dev_priv))
457 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
458 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
460 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
461 cur_state = !!(val & PIPECONF_ENABLE);
463 intel_display_power_put(dev_priv, power_domain, wakeref);
468 I915_STATE_WARN(cur_state != state,
469 "transcoder %s assertion failure (expected %s, current %s)\n",
470 transcoder_name(cpu_transcoder),
471 onoff(state), onoff(cur_state));
474 static void assert_plane(struct intel_plane *plane, bool state)
479 cur_state = plane->get_hw_state(plane, &pipe);
481 I915_STATE_WARN(cur_state != state,
482 "%s assertion failure (expected %s, current %s)\n",
483 plane->base.name, onoff(state), onoff(cur_state));
486 #define assert_plane_enabled(p) assert_plane(p, true)
487 #define assert_plane_disabled(p) assert_plane(p, false)
489 static void assert_planes_disabled(struct intel_crtc *crtc)
491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492 struct intel_plane *plane;
494 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
495 assert_plane_disabled(plane);
498 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
504 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
505 enabled = !!(val & TRANS_ENABLE);
506 I915_STATE_WARN(enabled,
507 "transcoder assertion failed, should be off on pipe %c but is still active\n",
511 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
512 enum pipe pipe, enum port port,
518 state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
520 I915_STATE_WARN(state && port_pipe == pipe,
521 "PCH DP %c enabled on transcoder %c, should be disabled\n",
522 port_name(port), pipe_name(pipe));
524 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
525 "IBX PCH DP %c still using transcoder B\n",
529 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
530 enum pipe pipe, enum port port,
536 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
538 I915_STATE_WARN(state && port_pipe == pipe,
539 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
540 port_name(port), pipe_name(pipe));
542 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
543 "IBX PCH HDMI %c still using transcoder B\n",
547 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
552 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
553 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
554 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
556 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
558 "PCH VGA enabled on transcoder %c, should be disabled\n",
561 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
563 "PCH LVDS enabled on transcoder %c, should be disabled\n",
566 /* PCH SDVOB multiplex with HDMIB */
567 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
568 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
569 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
572 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
573 struct intel_digital_port *dig_port,
574 unsigned int expected_mask)
579 switch (dig_port->base.port) {
581 port_mask = DPLL_PORTB_READY_MASK;
585 port_mask = DPLL_PORTC_READY_MASK;
590 port_mask = DPLL_PORTD_READY_MASK;
591 dpll_reg = DPIO_PHY_STATUS;
597 if (intel_de_wait_for_register(dev_priv, dpll_reg,
598 port_mask, expected_mask, 1000))
599 drm_WARN(&dev_priv->drm, 1,
600 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
601 dig_port->base.base.base.id, dig_port->base.base.name,
602 intel_de_read(dev_priv, dpll_reg) & port_mask,
606 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
608 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
610 enum pipe pipe = crtc->pipe;
612 u32 val, pipeconf_val;
614 /* Make sure PCH DPLL is enabled */
615 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
617 /* FDI must be feeding us bits for PCH ports */
618 assert_fdi_tx_enabled(dev_priv, pipe);
619 assert_fdi_rx_enabled(dev_priv, pipe);
621 if (HAS_PCH_CPT(dev_priv)) {
622 reg = TRANS_CHICKEN2(pipe);
623 val = intel_de_read(dev_priv, reg);
625 * Workaround: Set the timing override bit
626 * before enabling the pch transcoder.
628 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
629 /* Configure frame start delay to match the CPU */
630 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
631 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
632 intel_de_write(dev_priv, reg, val);
635 reg = PCH_TRANSCONF(pipe);
636 val = intel_de_read(dev_priv, reg);
637 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
639 if (HAS_PCH_IBX(dev_priv)) {
640 /* Configure frame start delay to match the CPU */
641 val &= ~TRANS_FRAME_START_DELAY_MASK;
642 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
645 * Make the BPC in transcoder be consistent with
646 * that in pipeconf reg. For HDMI we must use 8bpc
647 * here for both 8bpc and 12bpc.
649 val &= ~PIPECONF_BPC_MASK;
650 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
651 val |= PIPECONF_8BPC;
653 val |= pipeconf_val & PIPECONF_BPC_MASK;
656 val &= ~TRANS_INTERLACE_MASK;
657 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
658 if (HAS_PCH_IBX(dev_priv) &&
659 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
660 val |= TRANS_LEGACY_INTERLACED_ILK;
662 val |= TRANS_INTERLACED;
664 val |= TRANS_PROGRESSIVE;
667 intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
668 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
669 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
673 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
674 enum transcoder cpu_transcoder)
676 u32 val, pipeconf_val;
678 /* FDI must be feeding us bits for PCH ports */
679 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
680 assert_fdi_rx_enabled(dev_priv, PIPE_A);
682 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
683 /* Workaround: set timing override bit. */
684 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
685 /* Configure frame start delay to match the CPU */
686 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
687 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
688 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
691 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
693 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
694 PIPECONF_INTERLACED_ILK)
695 val |= TRANS_INTERLACED;
697 val |= TRANS_PROGRESSIVE;
699 intel_de_write(dev_priv, LPT_TRANSCONF, val);
700 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
701 TRANS_STATE_ENABLE, 100))
702 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
705 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
711 /* FDI relies on the transcoder */
712 assert_fdi_tx_disabled(dev_priv, pipe);
713 assert_fdi_rx_disabled(dev_priv, pipe);
715 /* Ports must be off as well */
716 assert_pch_ports_disabled(dev_priv, pipe);
718 reg = PCH_TRANSCONF(pipe);
719 val = intel_de_read(dev_priv, reg);
720 val &= ~TRANS_ENABLE;
721 intel_de_write(dev_priv, reg, val);
722 /* wait for PCH transcoder off, transcoder state */
723 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
724 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
727 if (HAS_PCH_CPT(dev_priv)) {
728 /* Workaround: Clear the timing override chicken bit again. */
729 reg = TRANS_CHICKEN2(pipe);
730 val = intel_de_read(dev_priv, reg);
731 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
732 intel_de_write(dev_priv, reg, val);
736 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
740 val = intel_de_read(dev_priv, LPT_TRANSCONF);
741 val &= ~TRANS_ENABLE;
742 intel_de_write(dev_priv, LPT_TRANSCONF, val);
743 /* wait for PCH transcoder off, transcoder state */
744 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
745 TRANS_STATE_ENABLE, 50))
746 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
748 /* Workaround: clear timing override bit. */
749 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
750 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
751 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
754 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
758 if (HAS_PCH_LPT(dev_priv))
764 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
766 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
768 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
769 enum pipe pipe = crtc->pipe;
773 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
775 assert_planes_disabled(crtc);
778 * A pipe without a PLL won't actually be able to drive bits from
779 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
782 if (HAS_GMCH(dev_priv)) {
783 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
784 assert_dsi_pll_enabled(dev_priv);
786 assert_pll_enabled(dev_priv, pipe);
788 if (new_crtc_state->has_pch_encoder) {
789 /* if driving the PCH, we need FDI enabled */
790 assert_fdi_rx_pll_enabled(dev_priv,
791 intel_crtc_pch_transcoder(crtc));
792 assert_fdi_tx_pll_enabled(dev_priv,
793 (enum pipe) cpu_transcoder);
795 /* FIXME: assert CPU port conditions for SNB+ */
798 reg = PIPECONF(cpu_transcoder);
799 val = intel_de_read(dev_priv, reg);
800 if (val & PIPECONF_ENABLE) {
801 /* we keep both pipes enabled on 830 */
802 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
806 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
807 intel_de_posting_read(dev_priv, reg);
810 * Until the pipe starts PIPEDSL reads will return a stale value,
811 * which causes an apparent vblank timestamp jump when PIPEDSL
812 * resets to its proper value. That also messes up the frame count
813 * when it's derived from the timestamps. So let's wait for the
814 * pipe to start properly before we call drm_crtc_vblank_on()
816 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
817 intel_wait_for_pipe_scanline_moving(crtc);
820 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
822 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
824 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
825 enum pipe pipe = crtc->pipe;
829 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
832 * Make sure planes won't keep trying to pump pixels to us,
833 * or we might hang the display.
835 assert_planes_disabled(crtc);
837 reg = PIPECONF(cpu_transcoder);
838 val = intel_de_read(dev_priv, reg);
839 if ((val & PIPECONF_ENABLE) == 0)
843 * Double wide has implications for planes
844 * so best keep it disabled when not needed.
846 if (old_crtc_state->double_wide)
847 val &= ~PIPECONF_DOUBLE_WIDE;
849 /* Don't disable pipe or pipe PLLs if needed */
850 if (!IS_I830(dev_priv))
851 val &= ~PIPECONF_ENABLE;
853 intel_de_write(dev_priv, reg, val);
854 if ((val & PIPECONF_ENABLE) == 0)
855 intel_wait_for_pipe_off(old_crtc_state);
858 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
860 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
863 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
865 if (is_ccs_modifier(fb->modifier))
866 return is_ccs_plane(fb, plane);
872 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
875 return info->is_yuv &&
876 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
879 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
882 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
887 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
889 struct drm_i915_private *dev_priv = to_i915(fb->dev);
890 unsigned int cpp = fb->format->cpp[color_plane];
892 switch (fb->modifier) {
893 case DRM_FORMAT_MOD_LINEAR:
894 return intel_tile_size(dev_priv);
895 case I915_FORMAT_MOD_X_TILED:
896 if (IS_GEN(dev_priv, 2))
900 case I915_FORMAT_MOD_Y_TILED_CCS:
901 if (is_ccs_plane(fb, color_plane))
904 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
905 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
906 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
907 if (is_ccs_plane(fb, color_plane))
910 case I915_FORMAT_MOD_Y_TILED:
911 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
915 case I915_FORMAT_MOD_Yf_TILED_CCS:
916 if (is_ccs_plane(fb, color_plane))
919 case I915_FORMAT_MOD_Yf_TILED:
935 MISSING_CASE(fb->modifier);
941 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
943 if (is_gen12_ccs_plane(fb, color_plane))
946 return intel_tile_size(to_i915(fb->dev)) /
947 intel_tile_width_bytes(fb, color_plane);
950 /* Return the tile dimensions in pixel units */
951 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
952 unsigned int *tile_width,
953 unsigned int *tile_height)
955 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
956 unsigned int cpp = fb->format->cpp[color_plane];
958 *tile_width = tile_width_bytes / cpp;
959 *tile_height = intel_tile_height(fb, color_plane);
962 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
965 unsigned int tile_width, tile_height;
967 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
969 return fb->pitches[color_plane] * tile_height;
973 intel_fb_align_height(const struct drm_framebuffer *fb,
974 int color_plane, unsigned int height)
976 unsigned int tile_height = intel_tile_height(fb, color_plane);
978 return ALIGN(height, tile_height);
981 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
983 unsigned int size = 0;
986 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
987 size += rot_info->plane[i].width * rot_info->plane[i].height;
992 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
994 unsigned int size = 0;
997 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
998 size += rem_info->plane[i].width * rem_info->plane[i].height;
1004 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1005 const struct drm_framebuffer *fb,
1006 unsigned int rotation)
1008 view->type = I915_GGTT_VIEW_NORMAL;
1009 if (drm_rotation_90_or_270(rotation)) {
1010 view->type = I915_GGTT_VIEW_ROTATED;
1011 view->rotated = to_intel_framebuffer(fb)->rot_info;
1015 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1017 if (IS_I830(dev_priv))
1019 else if (IS_I85X(dev_priv))
1021 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1027 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1029 if (INTEL_GEN(dev_priv) >= 9)
1031 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1032 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1034 else if (INTEL_GEN(dev_priv) >= 4)
1040 static bool has_async_flips(struct drm_i915_private *i915)
1042 return INTEL_GEN(i915) >= 5;
1045 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1048 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1050 /* AUX_DIST needs only 4K alignment */
1051 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
1052 is_ccs_plane(fb, color_plane))
1055 switch (fb->modifier) {
1056 case DRM_FORMAT_MOD_LINEAR:
1057 return intel_linear_alignment(dev_priv);
1058 case I915_FORMAT_MOD_X_TILED:
1059 if (has_async_flips(dev_priv))
1062 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1063 if (is_semiplanar_uv_plane(fb, color_plane))
1064 return intel_tile_row_size(fb, color_plane);
1066 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1067 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1069 case I915_FORMAT_MOD_Y_TILED_CCS:
1070 case I915_FORMAT_MOD_Yf_TILED_CCS:
1071 case I915_FORMAT_MOD_Y_TILED:
1072 if (INTEL_GEN(dev_priv) >= 12 &&
1073 is_semiplanar_uv_plane(fb, color_plane))
1074 return intel_tile_row_size(fb, color_plane);
1076 case I915_FORMAT_MOD_Yf_TILED:
1077 return 1 * 1024 * 1024;
1079 MISSING_CASE(fb->modifier);
1084 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1086 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1087 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1089 return INTEL_GEN(dev_priv) < 4 ||
1091 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
1095 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1096 const struct i915_ggtt_view *view,
1098 unsigned long *out_flags)
1100 struct drm_device *dev = fb->dev;
1101 struct drm_i915_private *dev_priv = to_i915(dev);
1102 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1103 intel_wakeref_t wakeref;
1104 struct i915_vma *vma;
1105 unsigned int pinctl;
1108 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1109 return ERR_PTR(-EINVAL);
1111 alignment = intel_surf_alignment(fb, 0);
1112 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1113 return ERR_PTR(-EINVAL);
1115 /* Note that the w/a also requires 64 PTE of padding following the
1116 * bo. We currently fill all unused PTE with the shadow page and so
1117 * we should always have valid PTE following the scanout preventing
1120 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1121 alignment = 256 * 1024;
1124 * Global gtt pte registers are special registers which actually forward
1125 * writes to a chunk of system memory. Which means that there is no risk
1126 * that the register values disappear as soon as we call
1127 * intel_runtime_pm_put(), so it is correct to wrap only the
1128 * pin/unpin/fence and not more.
1130 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1132 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1135 * Valleyview is definitely limited to scanning out the first
1136 * 512MiB. Lets presume this behaviour was inherited from the
1137 * g4x display engine and that all earlier gen are similarly
1138 * limited. Testing suggests that it is a little more
1139 * complicated than this. For example, Cherryview appears quite
1140 * happy to scanout from anywhere within its global aperture.
1143 if (HAS_GMCH(dev_priv))
1144 pinctl |= PIN_MAPPABLE;
1146 vma = i915_gem_object_pin_to_display_plane(obj,
1147 alignment, view, pinctl);
1151 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1155 * Install a fence for tiled scan-out. Pre-i965 always needs a
1156 * fence, whereas 965+ only requires a fence if using
1157 * framebuffer compression. For simplicity, we always, when
1158 * possible, install a fence as the cost is not that onerous.
1160 * If we fail to fence the tiled scanout, then either the
1161 * modeset will reject the change (which is highly unlikely as
1162 * the affected systems, all but one, do not have unmappable
1163 * space) or we will not be able to enable full powersaving
1164 * techniques (also likely not to apply due to various limits
1165 * FBC and the like impose on the size of the buffer, which
1166 * presumably we violated anyway with this unmappable buffer).
1167 * Anyway, it is presumably better to stumble onwards with
1168 * something and try to run the system in a "less than optimal"
1169 * mode that matches the user configuration.
1171 ret = i915_vma_pin_fence(vma);
1172 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1173 i915_vma_unpin(vma);
1178 if (ret == 0 && vma->fence)
1179 *out_flags |= PLANE_HAS_FENCE;
1184 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1185 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1189 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1191 if (flags & PLANE_HAS_FENCE)
1192 i915_vma_unpin_fence(vma);
1193 i915_vma_unpin(vma);
1197 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
1198 unsigned int rotation)
1200 if (drm_rotation_90_or_270(rotation))
1201 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
1203 return fb->pitches[color_plane];
1207 * Convert the x/y offsets into a linear offset.
1208 * Only valid with 0/180 degree rotation, which is fine since linear
1209 * offset is only used with linear buffers on pre-hsw and tiled buffers
1210 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1212 u32 intel_fb_xy_to_linear(int x, int y,
1213 const struct intel_plane_state *state,
1216 const struct drm_framebuffer *fb = state->hw.fb;
1217 unsigned int cpp = fb->format->cpp[color_plane];
1218 unsigned int pitch = state->color_plane[color_plane].stride;
1220 return y * pitch + x * cpp;
1224 * Add the x/y offsets derived from fb->offsets[] to the user
1225 * specified plane src x/y offsets. The resulting x/y offsets
1226 * specify the start of scanout from the beginning of the gtt mapping.
1228 void intel_add_fb_offsets(int *x, int *y,
1229 const struct intel_plane_state *state,
1233 *x += state->color_plane[color_plane].x;
1234 *y += state->color_plane[color_plane].y;
1237 static u32 intel_adjust_tile_offset(int *x, int *y,
1238 unsigned int tile_width,
1239 unsigned int tile_height,
1240 unsigned int tile_size,
1241 unsigned int pitch_tiles,
1245 unsigned int pitch_pixels = pitch_tiles * tile_width;
1248 WARN_ON(old_offset & (tile_size - 1));
1249 WARN_ON(new_offset & (tile_size - 1));
1250 WARN_ON(new_offset > old_offset);
1252 tiles = (old_offset - new_offset) / tile_size;
1254 *y += tiles / pitch_tiles * tile_height;
1255 *x += tiles % pitch_tiles * tile_width;
1257 /* minimize x in case it got needlessly big */
1258 *y += *x / pitch_pixels * tile_height;
1264 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
1266 return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
1267 is_gen12_ccs_plane(fb, color_plane);
1270 static u32 intel_adjust_aligned_offset(int *x, int *y,
1271 const struct drm_framebuffer *fb,
1273 unsigned int rotation,
1275 u32 old_offset, u32 new_offset)
1277 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1278 unsigned int cpp = fb->format->cpp[color_plane];
1280 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
1282 if (!is_surface_linear(fb, color_plane)) {
1283 unsigned int tile_size, tile_width, tile_height;
1284 unsigned int pitch_tiles;
1286 tile_size = intel_tile_size(dev_priv);
1287 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1289 if (drm_rotation_90_or_270(rotation)) {
1290 pitch_tiles = pitch / tile_height;
1291 swap(tile_width, tile_height);
1293 pitch_tiles = pitch / (tile_width * cpp);
1296 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1297 tile_size, pitch_tiles,
1298 old_offset, new_offset);
1300 old_offset += *y * pitch + *x * cpp;
1302 *y = (old_offset - new_offset) / pitch;
1303 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
1310 * Adjust the tile offset by moving the difference into
1313 u32 intel_plane_adjust_aligned_offset(int *x, int *y,
1314 const struct intel_plane_state *state,
1316 u32 old_offset, u32 new_offset)
1318 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
1320 state->color_plane[color_plane].stride,
1321 old_offset, new_offset);
1325 * Computes the aligned offset to the base tile and adjusts
1326 * x, y. bytes per pixel is assumed to be a power-of-two.
1328 * In the 90/270 rotated case, x and y are assumed
1329 * to be already rotated to match the rotated GTT view, and
1330 * pitch is the tile_height aligned framebuffer height.
1332 * This function is used when computing the derived information
1333 * under intel_framebuffer, so using any of that information
1334 * here is not allowed. Anything under drm_framebuffer can be
1335 * used. This is why the user has to pass in the pitch since it
1336 * is specified in the rotated orientation.
1338 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
1340 const struct drm_framebuffer *fb,
1343 unsigned int rotation,
1346 unsigned int cpp = fb->format->cpp[color_plane];
1347 u32 offset, offset_aligned;
1349 if (!is_surface_linear(fb, color_plane)) {
1350 unsigned int tile_size, tile_width, tile_height;
1351 unsigned int tile_rows, tiles, pitch_tiles;
1353 tile_size = intel_tile_size(dev_priv);
1354 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1356 if (drm_rotation_90_or_270(rotation)) {
1357 pitch_tiles = pitch / tile_height;
1358 swap(tile_width, tile_height);
1360 pitch_tiles = pitch / (tile_width * cpp);
1363 tile_rows = *y / tile_height;
1366 tiles = *x / tile_width;
1369 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
1371 offset_aligned = offset;
1373 offset_aligned = rounddown(offset_aligned, alignment);
1375 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1376 tile_size, pitch_tiles,
1377 offset, offset_aligned);
1379 offset = *y * pitch + *x * cpp;
1380 offset_aligned = offset;
1382 offset_aligned = rounddown(offset_aligned, alignment);
1383 *y = (offset % alignment) / pitch;
1384 *x = ((offset % alignment) - *y * pitch) / cpp;
1390 return offset_aligned;
1393 u32 intel_plane_compute_aligned_offset(int *x, int *y,
1394 const struct intel_plane_state *state,
1397 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
1398 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
1399 const struct drm_framebuffer *fb = state->hw.fb;
1400 unsigned int rotation = state->hw.rotation;
1401 int pitch = state->color_plane[color_plane].stride;
1404 if (intel_plane->id == PLANE_CURSOR)
1405 alignment = intel_cursor_alignment(dev_priv);
1407 alignment = intel_surf_alignment(fb, color_plane);
1409 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
1410 pitch, rotation, alignment);
1413 /* Convert the fb->offset[] into x/y offsets */
1414 static int intel_fb_offset_to_xy(int *x, int *y,
1415 const struct drm_framebuffer *fb,
1418 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1419 unsigned int height;
1422 if (INTEL_GEN(dev_priv) >= 12 &&
1423 is_semiplanar_uv_plane(fb, color_plane))
1424 alignment = intel_tile_row_size(fb, color_plane);
1425 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
1426 alignment = intel_tile_size(dev_priv);
1430 if (alignment != 0 && fb->offsets[color_plane] % alignment) {
1431 drm_dbg_kms(&dev_priv->drm,
1432 "Misaligned offset 0x%08x for color plane %d\n",
1433 fb->offsets[color_plane], color_plane);
1437 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
1438 height = ALIGN(height, intel_tile_height(fb, color_plane));
1440 /* Catch potential overflows early */
1441 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
1442 fb->offsets[color_plane])) {
1443 drm_dbg_kms(&dev_priv->drm,
1444 "Bad offset 0x%08x or pitch %d for color plane %d\n",
1445 fb->offsets[color_plane], fb->pitches[color_plane],
1453 intel_adjust_aligned_offset(x, y,
1454 fb, color_plane, DRM_MODE_ROTATE_0,
1455 fb->pitches[color_plane],
1456 fb->offsets[color_plane], 0);
1461 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1463 switch (fb_modifier) {
1464 case I915_FORMAT_MOD_X_TILED:
1465 return I915_TILING_X;
1466 case I915_FORMAT_MOD_Y_TILED:
1467 case I915_FORMAT_MOD_Y_TILED_CCS:
1468 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1469 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1470 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1471 return I915_TILING_Y;
1473 return I915_TILING_NONE;
1478 * From the Sky Lake PRM:
1479 * "The Color Control Surface (CCS) contains the compression status of
1480 * the cache-line pairs. The compression state of the cache-line pair
1481 * is specified by 2 bits in the CCS. Each CCS cache-line represents
1482 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1483 * cache-line-pairs. CCS is always Y tiled."
1485 * Since cache line pairs refers to horizontally adjacent cache lines,
1486 * each cache line in the CCS corresponds to an area of 32x16 cache
1487 * lines on the main surface. Since each pixel is 4 bytes, this gives
1488 * us a ratio of one byte in the CCS for each 8x16 pixels in the
1491 static const struct drm_format_info skl_ccs_formats[] = {
1492 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1493 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1494 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1495 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1496 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1497 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1498 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1499 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1503 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1504 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1505 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1506 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1509 static const struct drm_format_info gen12_ccs_formats[] = {
1510 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1511 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1512 .hsub = 1, .vsub = 1, },
1513 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1514 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1515 .hsub = 1, .vsub = 1, },
1516 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1517 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1518 .hsub = 1, .vsub = 1, .has_alpha = true },
1519 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1520 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1521 .hsub = 1, .vsub = 1, .has_alpha = true },
1522 { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1523 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1524 .hsub = 2, .vsub = 1, .is_yuv = true },
1525 { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1526 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1527 .hsub = 2, .vsub = 1, .is_yuv = true },
1528 { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1529 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1530 .hsub = 2, .vsub = 1, .is_yuv = true },
1531 { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1532 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1533 .hsub = 2, .vsub = 1, .is_yuv = true },
1534 { .format = DRM_FORMAT_NV12, .num_planes = 4,
1535 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1536 .hsub = 2, .vsub = 2, .is_yuv = true },
1537 { .format = DRM_FORMAT_P010, .num_planes = 4,
1538 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1539 .hsub = 2, .vsub = 2, .is_yuv = true },
1540 { .format = DRM_FORMAT_P012, .num_planes = 4,
1541 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1542 .hsub = 2, .vsub = 2, .is_yuv = true },
1543 { .format = DRM_FORMAT_P016, .num_planes = 4,
1544 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1545 .hsub = 2, .vsub = 2, .is_yuv = true },
1549 * Same as gen12_ccs_formats[] above, but with additional surface used
1550 * to pass Clear Color information in plane 2 with 64 bits of data.
1552 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1553 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1554 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1555 .hsub = 1, .vsub = 1, },
1556 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1557 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1558 .hsub = 1, .vsub = 1, },
1559 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1560 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1561 .hsub = 1, .vsub = 1, .has_alpha = true },
1562 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1563 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1564 .hsub = 1, .vsub = 1, .has_alpha = true },
1567 static const struct drm_format_info *
1568 lookup_format_info(const struct drm_format_info formats[],
1569 int num_formats, u32 format)
1573 for (i = 0; i < num_formats; i++) {
1574 if (formats[i].format == format)
1581 static const struct drm_format_info *
1582 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1584 switch (cmd->modifier[0]) {
1585 case I915_FORMAT_MOD_Y_TILED_CCS:
1586 case I915_FORMAT_MOD_Yf_TILED_CCS:
1587 return lookup_format_info(skl_ccs_formats,
1588 ARRAY_SIZE(skl_ccs_formats),
1590 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1591 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1592 return lookup_format_info(gen12_ccs_formats,
1593 ARRAY_SIZE(gen12_ccs_formats),
1595 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1596 return lookup_format_info(gen12_ccs_cc_formats,
1597 ARRAY_SIZE(gen12_ccs_cc_formats),
1604 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1606 return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1610 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1611 u32 pixel_format, u64 modifier)
1613 struct intel_crtc *crtc;
1614 struct intel_plane *plane;
1617 * We assume the primary plane for pipe A has
1618 * the highest stride limits of them all,
1619 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1621 crtc = intel_get_first_crtc(dev_priv);
1625 plane = to_intel_plane(crtc->base.primary);
1627 return plane->max_stride(plane, pixel_format, modifier,
1632 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1633 u32 pixel_format, u64 modifier)
1636 * Arbitrary limit for gen4+ chosen to match the
1637 * render engine max stride.
1639 * The new CCS hash mode makes remapping impossible
1641 if (!is_ccs_modifier(modifier)) {
1642 if (INTEL_GEN(dev_priv) >= 7)
1644 else if (INTEL_GEN(dev_priv) >= 4)
1648 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1652 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1654 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1657 if (is_surface_linear(fb, color_plane)) {
1658 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1663 * To make remapping with linear generally feasible
1664 * we need the stride to be page aligned.
1666 if (fb->pitches[color_plane] > max_stride &&
1667 !is_ccs_modifier(fb->modifier))
1668 return intel_tile_size(dev_priv);
1673 tile_width = intel_tile_width_bytes(fb, color_plane);
1674 if (is_ccs_modifier(fb->modifier)) {
1676 * Display WA #0531: skl,bxt,kbl,glk
1678 * Render decompression and plane width > 3840
1679 * combined with horizontal panning requires the
1680 * plane stride to be a multiple of 4. We'll just
1681 * require the entire fb to accommodate that to avoid
1682 * potential runtime errors at plane configuration time.
1684 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1687 * The main surface pitch must be padded to a multiple of four
1690 else if (INTEL_GEN(dev_priv) >= 12)
1696 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
1698 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1699 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1700 const struct drm_framebuffer *fb = plane_state->hw.fb;
1703 /* We don't want to deal with remapping with cursors */
1704 if (plane->id == PLANE_CURSOR)
1708 * The display engine limits already match/exceed the
1709 * render engine limits, so not much point in remapping.
1710 * Would also need to deal with the fence POT alignment
1711 * and gen2 2KiB GTT tile size.
1713 if (INTEL_GEN(dev_priv) < 4)
1717 * The new CCS hash mode isn't compatible with remapping as
1718 * the virtual address of the pages affects the compressed data.
1720 if (is_ccs_modifier(fb->modifier))
1723 /* Linear needs a page aligned stride for remapping */
1724 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1725 unsigned int alignment = intel_tile_size(dev_priv) - 1;
1727 for (i = 0; i < fb->format->num_planes; i++) {
1728 if (fb->pitches[i] & alignment)
1736 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
1738 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1739 const struct drm_framebuffer *fb = plane_state->hw.fb;
1740 unsigned int rotation = plane_state->hw.rotation;
1741 u32 stride, max_stride;
1744 * No remapping for invisible planes since we don't have
1745 * an actual source viewport to remap.
1747 if (!plane_state->uapi.visible)
1750 if (!intel_plane_can_remap(plane_state))
1754 * FIXME: aux plane limits on gen9+ are
1755 * unclear in Bspec, for now no checking.
1757 stride = intel_fb_pitch(fb, 0, rotation);
1758 max_stride = plane->max_stride(plane, fb->format->format,
1759 fb->modifier, rotation);
1761 return stride > max_stride;
1765 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
1766 const struct drm_framebuffer *fb,
1771 if (color_plane == 0) {
1779 * TODO: Deduct the subsampling from the char block for all CCS
1780 * formats and planes.
1782 if (!is_gen12_ccs_plane(fb, color_plane)) {
1783 *hsub = fb->format->hsub;
1784 *vsub = fb->format->vsub;
1789 main_plane = skl_ccs_to_main_plane(fb, color_plane);
1790 *hsub = drm_format_info_block_width(fb->format, color_plane) /
1791 drm_format_info_block_width(fb->format, main_plane);
1794 * The min stride check in the core framebuffer_check() function
1795 * assumes that format->hsub applies to every plane except for the
1796 * first plane. That's incorrect for the CCS AUX plane of the first
1797 * plane, but for the above check to pass we must define the block
1798 * width with that subsampling applied to it. Adjust the width here
1799 * accordingly, so we can calculate the actual subsampling factor.
1801 if (main_plane == 0)
1802 *hsub *= fb->format->hsub;
1807 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
1809 struct drm_i915_private *i915 = to_i915(fb->dev);
1810 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1813 int tile_width, tile_height;
1817 if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
1820 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
1821 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
1824 tile_height *= vsub;
1826 ccs_x = (x * hsub) % tile_width;
1827 ccs_y = (y * vsub) % tile_height;
1829 main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
1830 main_x = intel_fb->normal[main_plane].x % tile_width;
1831 main_y = intel_fb->normal[main_plane].y % tile_height;
1834 * CCS doesn't have its own x/y offset register, so the intra CCS tile
1835 * x/y offsets must match between CCS and the main surface.
1837 if (main_x != ccs_x || main_y != ccs_y) {
1838 drm_dbg_kms(&i915->drm,
1839 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
1842 intel_fb->normal[main_plane].x,
1843 intel_fb->normal[main_plane].y,
1852 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
1854 int main_plane = is_ccs_plane(fb, color_plane) ?
1855 skl_ccs_to_main_plane(fb, color_plane) : 0;
1856 int main_hsub, main_vsub;
1859 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
1860 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
1861 *w = fb->width / main_hsub / hsub;
1862 *h = fb->height / main_vsub / vsub;
1866 * Setup the rotated view for an FB plane and return the size the GTT mapping
1867 * requires for this view.
1870 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
1871 u32 gtt_offset_rotated, int x, int y,
1872 unsigned int width, unsigned int height,
1873 unsigned int tile_size,
1874 unsigned int tile_width, unsigned int tile_height,
1875 struct drm_framebuffer *fb)
1877 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1878 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
1879 unsigned int pitch_tiles;
1882 /* Y or Yf modifiers required for 90/270 rotation */
1883 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
1884 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
1887 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
1890 rot_info->plane[plane] = *plane_info;
1892 intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
1894 /* rotate the x/y offsets to match the GTT view */
1895 drm_rect_init(&r, x, y, width, height);
1897 plane_info->width * tile_width,
1898 plane_info->height * tile_height,
1899 DRM_MODE_ROTATE_270);
1903 /* rotate the tile dimensions to match the GTT view */
1904 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
1905 swap(tile_width, tile_height);
1908 * We only keep the x/y offsets, so push all of the
1909 * gtt offset into the x/y offsets.
1911 intel_adjust_tile_offset(&x, &y,
1912 tile_width, tile_height,
1913 tile_size, pitch_tiles,
1914 gtt_offset_rotated * tile_size, 0);
1917 * First pixel of the framebuffer from
1918 * the start of the rotated gtt mapping.
1920 intel_fb->rotated[plane].x = x;
1921 intel_fb->rotated[plane].y = y;
1923 return plane_info->width * plane_info->height;
1927 intel_fill_fb_info(struct drm_i915_private *dev_priv,
1928 struct drm_framebuffer *fb)
1930 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1931 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1932 u32 gtt_offset_rotated = 0;
1933 unsigned int max_size = 0;
1934 int i, num_planes = fb->format->num_planes;
1935 unsigned int tile_size = intel_tile_size(dev_priv);
1937 for (i = 0; i < num_planes; i++) {
1938 unsigned int width, height;
1939 unsigned int cpp, size;
1945 * Plane 2 of Render Compression with Clear Color fb modifier
1946 * is consumed by the driver and not passed to DE. Skip the
1947 * arithmetic related to alignment and offset calculation.
1949 if (is_gen12_ccs_cc_plane(fb, i)) {
1950 if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
1956 cpp = fb->format->cpp[i];
1957 intel_fb_plane_dims(&width, &height, fb, i);
1959 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
1961 drm_dbg_kms(&dev_priv->drm,
1962 "bad fb plane %d offset: 0x%x\n",
1967 ret = intel_fb_check_ccs_xy(fb, i, x, y);
1972 * The fence (if used) is aligned to the start of the object
1973 * so having the framebuffer wrap around across the edge of the
1974 * fenced region doesn't really work. We have no API to configure
1975 * the fence start offset within the object (nor could we probably
1976 * on gen2/3). So it's just easier if we just require that the
1977 * fb layout agrees with the fence layout. We already check that the
1978 * fb stride matches the fence stride elsewhere.
1980 if (i == 0 && i915_gem_object_is_tiled(obj) &&
1981 (x + width) * cpp > fb->pitches[i]) {
1982 drm_dbg_kms(&dev_priv->drm,
1983 "bad fb plane %d offset: 0x%x\n",
1989 * First pixel of the framebuffer from
1990 * the start of the normal gtt mapping.
1992 intel_fb->normal[i].x = x;
1993 intel_fb->normal[i].y = y;
1995 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
1999 offset /= tile_size;
2001 if (!is_surface_linear(fb, i)) {
2002 struct intel_remapped_plane_info plane_info;
2003 unsigned int tile_width, tile_height;
2005 intel_tile_dims(fb, i, &tile_width, &tile_height);
2007 plane_info.offset = offset;
2008 plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
2010 plane_info.width = DIV_ROUND_UP(x + width, tile_width);
2011 plane_info.height = DIV_ROUND_UP(y + height,
2014 /* how many tiles does this plane need */
2015 size = plane_info.stride * plane_info.height;
2017 * If the plane isn't horizontally tile aligned,
2018 * we need one more tile.
2023 gtt_offset_rotated +=
2024 setup_fb_rotation(i, &plane_info,
2026 x, y, width, height,
2028 tile_width, tile_height,
2031 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2032 x * cpp, tile_size);
2035 /* how many tiles in total needed in the bo */
2036 max_size = max(max_size, offset + size);
2039 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2040 drm_dbg_kms(&dev_priv->drm,
2041 "fb too big for bo (need %llu bytes, have %zu bytes)\n",
2042 mul_u32_u32(max_size, tile_size), obj->base.size);
2050 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2052 struct drm_i915_private *dev_priv =
2053 to_i915(plane_state->uapi.plane->dev);
2054 struct drm_framebuffer *fb = plane_state->hw.fb;
2055 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2056 struct intel_rotation_info *info = &plane_state->view.rotated;
2057 unsigned int rotation = plane_state->hw.rotation;
2058 int i, num_planes = fb->format->num_planes;
2059 unsigned int tile_size = intel_tile_size(dev_priv);
2060 unsigned int src_x, src_y;
2061 unsigned int src_w, src_h;
2064 memset(&plane_state->view, 0, sizeof(plane_state->view));
2065 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2066 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2068 src_x = plane_state->uapi.src.x1 >> 16;
2069 src_y = plane_state->uapi.src.y1 >> 16;
2070 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2071 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2073 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
2075 /* Make src coordinates relative to the viewport */
2076 drm_rect_translate(&plane_state->uapi.src,
2077 -(src_x << 16), -(src_y << 16));
2079 /* Rotate src coordinates to match rotated GTT view */
2080 if (drm_rotation_90_or_270(rotation))
2081 drm_rect_rotate(&plane_state->uapi.src,
2082 src_w << 16, src_h << 16,
2083 DRM_MODE_ROTATE_270);
2085 for (i = 0; i < num_planes; i++) {
2086 unsigned int hsub = i ? fb->format->hsub : 1;
2087 unsigned int vsub = i ? fb->format->vsub : 1;
2088 unsigned int cpp = fb->format->cpp[i];
2089 unsigned int tile_width, tile_height;
2090 unsigned int width, height;
2091 unsigned int pitch_tiles;
2095 intel_tile_dims(fb, i, &tile_width, &tile_height);
2099 width = src_w / hsub;
2100 height = src_h / vsub;
2103 * First pixel of the src viewport from the
2104 * start of the normal gtt mapping.
2106 x += intel_fb->normal[i].x;
2107 y += intel_fb->normal[i].y;
2109 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2110 fb, i, fb->pitches[i],
2111 DRM_MODE_ROTATE_0, tile_size);
2112 offset /= tile_size;
2114 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
2115 info->plane[i].offset = offset;
2116 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2118 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2119 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2121 if (drm_rotation_90_or_270(rotation)) {
2124 /* rotate the x/y offsets to match the GTT view */
2125 drm_rect_init(&r, x, y, width, height);
2127 info->plane[i].width * tile_width,
2128 info->plane[i].height * tile_height,
2129 DRM_MODE_ROTATE_270);
2133 pitch_tiles = info->plane[i].height;
2134 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2136 /* rotate the tile dimensions to match the GTT view */
2137 swap(tile_width, tile_height);
2139 pitch_tiles = info->plane[i].width;
2140 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2144 * We only keep the x/y offsets, so push all of the
2145 * gtt offset into the x/y offsets.
2147 intel_adjust_tile_offset(&x, &y,
2148 tile_width, tile_height,
2149 tile_size, pitch_tiles,
2150 gtt_offset * tile_size, 0);
2152 gtt_offset += info->plane[i].width * info->plane[i].height;
2154 plane_state->color_plane[i].offset = 0;
2155 plane_state->color_plane[i].x = x;
2156 plane_state->color_plane[i].y = y;
2161 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2163 const struct intel_framebuffer *fb =
2164 to_intel_framebuffer(plane_state->hw.fb);
2165 unsigned int rotation = plane_state->hw.rotation;
2171 num_planes = fb->base.format->num_planes;
2173 if (intel_plane_needs_remap(plane_state)) {
2174 intel_plane_remap_gtt(plane_state);
2177 * Sometimes even remapping can't overcome
2178 * the stride limitations :( Can happen with
2179 * big plane sizes and suitably misaligned
2182 return intel_plane_check_stride(plane_state);
2185 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2187 for (i = 0; i < num_planes; i++) {
2188 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2189 plane_state->color_plane[i].offset = 0;
2191 if (drm_rotation_90_or_270(rotation)) {
2192 plane_state->color_plane[i].x = fb->rotated[i].x;
2193 plane_state->color_plane[i].y = fb->rotated[i].y;
2195 plane_state->color_plane[i].x = fb->normal[i].x;
2196 plane_state->color_plane[i].y = fb->normal[i].y;
2200 /* Rotate src coordinates to match rotated GTT view */
2201 if (drm_rotation_90_or_270(rotation))
2202 drm_rect_rotate(&plane_state->uapi.src,
2203 fb->base.width << 16, fb->base.height << 16,
2204 DRM_MODE_ROTATE_270);
2206 return intel_plane_check_stride(plane_state);
2209 static struct i915_vma *
2210 initial_plane_vma(struct drm_i915_private *i915,
2211 struct intel_initial_plane_config *plane_config)
2213 struct drm_i915_gem_object *obj;
2214 struct i915_vma *vma;
2217 if (plane_config->size == 0)
2220 base = round_down(plane_config->base,
2221 I915_GTT_MIN_ALIGNMENT);
2222 size = round_up(plane_config->base + plane_config->size,
2223 I915_GTT_MIN_ALIGNMENT);
2227 * If the FB is too big, just don't use it since fbdev is not very
2228 * important and we should probably use that space with FBC or other
2231 if (size * 2 > i915->stolen_usable_size)
2234 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
2239 * Mark it WT ahead of time to avoid changing the
2240 * cache_level during fbdev initialization. The
2241 * unbind there would get stuck waiting for rcu.
2243 i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
2244 I915_CACHE_WT : I915_CACHE_NONE);
2246 switch (plane_config->tiling) {
2247 case I915_TILING_NONE:
2251 obj->tiling_and_stride =
2252 plane_config->fb->base.pitches[0] |
2253 plane_config->tiling;
2256 MISSING_CASE(plane_config->tiling);
2260 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
2264 if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
2267 if (i915_gem_object_is_tiled(obj) &&
2268 !i915_vma_is_map_and_fenceable(vma))
2274 i915_gem_object_put(obj);
2279 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2280 struct intel_initial_plane_config *plane_config)
2282 struct drm_device *dev = crtc->base.dev;
2283 struct drm_i915_private *dev_priv = to_i915(dev);
2284 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2285 struct drm_framebuffer *fb = &plane_config->fb->base;
2286 struct i915_vma *vma;
2288 switch (fb->modifier) {
2289 case DRM_FORMAT_MOD_LINEAR:
2290 case I915_FORMAT_MOD_X_TILED:
2291 case I915_FORMAT_MOD_Y_TILED:
2294 drm_dbg(&dev_priv->drm,
2295 "Unsupported modifier for initial FB: 0x%llx\n",
2300 vma = initial_plane_vma(dev_priv, plane_config);
2304 mode_cmd.pixel_format = fb->format->format;
2305 mode_cmd.width = fb->width;
2306 mode_cmd.height = fb->height;
2307 mode_cmd.pitches[0] = fb->pitches[0];
2308 mode_cmd.modifier[0] = fb->modifier;
2309 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2311 if (intel_framebuffer_init(to_intel_framebuffer(fb),
2312 vma->obj, &mode_cmd)) {
2313 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
2317 plane_config->vma = vma;
2326 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2327 struct intel_plane_state *plane_state,
2330 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2332 plane_state->uapi.visible = visible;
2335 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
2337 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
2340 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
2342 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2343 struct drm_plane *plane;
2346 * Active_planes aliases if multiple "primary" or cursor planes
2347 * have been used on the same (or wrong) pipe. plane_mask uses
2348 * unique ids, hence we can use that to reconstruct active_planes.
2350 crtc_state->enabled_planes = 0;
2351 crtc_state->active_planes = 0;
2353 drm_for_each_plane_mask(plane, &dev_priv->drm,
2354 crtc_state->uapi.plane_mask) {
2355 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
2356 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2360 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2361 struct intel_plane *plane)
2363 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2364 struct intel_crtc_state *crtc_state =
2365 to_intel_crtc_state(crtc->base.state);
2366 struct intel_plane_state *plane_state =
2367 to_intel_plane_state(plane->base.state);
2369 drm_dbg_kms(&dev_priv->drm,
2370 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2371 plane->base.base.id, plane->base.name,
2372 crtc->base.base.id, crtc->base.name);
2374 intel_set_plane_visible(crtc_state, plane_state, false);
2375 fixup_plane_bitmasks(crtc_state);
2376 crtc_state->data_rate[plane->id] = 0;
2377 crtc_state->min_cdclk[plane->id] = 0;
2379 if (plane->id == PLANE_PRIMARY)
2380 hsw_disable_ips(crtc_state);
2383 * Vblank time updates from the shadow to live plane control register
2384 * are blocked if the memory self-refresh mode is active at that
2385 * moment. So to make sure the plane gets truly disabled, disable
2386 * first the self-refresh mode. The self-refresh enable bit in turn
2387 * will be checked/applied by the HW only at the next frame start
2388 * event which is after the vblank start event, so we need to have a
2389 * wait-for-vblank between disabling the plane and the pipe.
2391 if (HAS_GMCH(dev_priv) &&
2392 intel_set_memory_cxsr(dev_priv, false))
2393 intel_wait_for_vblank(dev_priv, crtc->pipe);
2396 * Gen2 reports pipe underruns whenever all planes are disabled.
2397 * So disable underrun reporting before all the planes get disabled.
2399 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
2400 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
2402 intel_disable_plane(plane, crtc_state);
2403 intel_wait_for_vblank(dev_priv, crtc->pipe);
2407 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2408 struct intel_initial_plane_config *plane_config)
2410 struct drm_device *dev = intel_crtc->base.dev;
2411 struct drm_i915_private *dev_priv = to_i915(dev);
2413 struct drm_plane *primary = intel_crtc->base.primary;
2414 struct drm_plane_state *plane_state = primary->state;
2415 struct intel_plane *intel_plane = to_intel_plane(primary);
2416 struct intel_plane_state *intel_state =
2417 to_intel_plane_state(plane_state);
2418 struct intel_crtc_state *crtc_state =
2419 to_intel_crtc_state(intel_crtc->base.state);
2420 struct drm_framebuffer *fb;
2421 struct i915_vma *vma;
2423 if (!plane_config->fb)
2426 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2427 fb = &plane_config->fb->base;
2428 vma = plane_config->vma;
2433 * Failed to alloc the obj, check to see if we should share
2434 * an fb with another CRTC instead
2436 for_each_crtc(dev, c) {
2437 struct intel_plane_state *state;
2439 if (c == &intel_crtc->base)
2442 if (!to_intel_crtc_state(c->state)->uapi.active)
2445 state = to_intel_plane_state(c->primary->state);
2449 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2457 * We've failed to reconstruct the BIOS FB. Current display state
2458 * indicates that the primary plane is visible, but has a NULL FB,
2459 * which will lead to problems later if we don't fix it up. The
2460 * simplest solution is to just disable the primary plane now and
2461 * pretend the BIOS never had it enabled.
2463 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2464 if (crtc_state->bigjoiner) {
2465 struct intel_crtc *slave =
2466 crtc_state->bigjoiner_linked_crtc;
2467 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2473 intel_state->hw.rotation = plane_config->rotation;
2474 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2475 intel_state->hw.rotation);
2476 intel_state->color_plane[0].stride =
2477 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
2479 __i915_vma_pin(vma);
2480 intel_state->vma = i915_vma_get(vma);
2481 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
2483 intel_state->flags |= PLANE_HAS_FENCE;
2485 plane_state->src_x = 0;
2486 plane_state->src_y = 0;
2487 plane_state->src_w = fb->width << 16;
2488 plane_state->src_h = fb->height << 16;
2490 plane_state->crtc_x = 0;
2491 plane_state->crtc_y = 0;
2492 plane_state->crtc_w = fb->width;
2493 plane_state->crtc_h = fb->height;
2495 intel_state->uapi.src = drm_plane_state_src(plane_state);
2496 intel_state->uapi.dst = drm_plane_state_dest(plane_state);
2498 if (plane_config->tiling)
2499 dev_priv->preserve_bios_swizzle = true;
2501 plane_state->fb = fb;
2502 drm_framebuffer_get(fb);
2504 plane_state->crtc = &intel_crtc->base;
2505 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2508 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2510 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2511 &to_intel_frontbuffer(fb)->bits);
2515 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2519 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2520 plane_state->color_plane[0].offset, 0);
2526 __intel_display_resume(struct drm_device *dev,
2527 struct drm_atomic_state *state,
2528 struct drm_modeset_acquire_ctx *ctx)
2530 struct drm_crtc_state *crtc_state;
2531 struct drm_crtc *crtc;
2534 intel_modeset_setup_hw_state(dev, ctx);
2535 intel_vga_redisable(to_i915(dev));
2541 * We've duplicated the state, pointers to the old state are invalid.
2543 * Don't attempt to use the old state until we commit the duplicated state.
2545 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2547 * Force recalculation even if we restore
2548 * current state. With fast modeset this may not result
2549 * in a modeset when the state is compatible.
2551 crtc_state->mode_changed = true;
2554 /* ignore any reset values/BIOS leftovers in the WM registers */
2555 if (!HAS_GMCH(to_i915(dev)))
2556 to_intel_atomic_state(state)->skip_intermediate_wm = true;
2558 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2560 drm_WARN_ON(dev, ret == -EDEADLK);
2564 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2566 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2567 intel_has_gpu_reset(&dev_priv->gt));
2570 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2572 struct drm_device *dev = &dev_priv->drm;
2573 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2574 struct drm_atomic_state *state;
2577 if (!HAS_DISPLAY(dev_priv))
2580 /* reset doesn't touch the display */
2581 if (!dev_priv->params.force_reset_modeset_test &&
2582 !gpu_reset_clobbers_display(dev_priv))
2585 /* We have a modeset vs reset deadlock, defensively unbreak it. */
2586 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2587 smp_mb__after_atomic();
2588 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2590 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2591 drm_dbg_kms(&dev_priv->drm,
2592 "Modeset potentially stuck, unbreaking through wedging\n");
2593 intel_gt_set_wedged(&dev_priv->gt);
2597 * Need mode_config.mutex so that we don't
2598 * trample ongoing ->detect() and whatnot.
2600 mutex_lock(&dev->mode_config.mutex);
2601 drm_modeset_acquire_init(ctx, 0);
2603 ret = drm_modeset_lock_all_ctx(dev, ctx);
2604 if (ret != -EDEADLK)
2607 drm_modeset_backoff(ctx);
2610 * Disabling the crtcs gracefully seems nicer. Also the
2611 * g33 docs say we should at least disable all the planes.
2613 state = drm_atomic_helper_duplicate_state(dev, ctx);
2614 if (IS_ERR(state)) {
2615 ret = PTR_ERR(state);
2616 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2621 ret = drm_atomic_helper_disable_all(dev, ctx);
2623 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2625 drm_atomic_state_put(state);
2629 dev_priv->modeset_restore_state = state;
2630 state->acquire_ctx = ctx;
2633 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2635 struct drm_device *dev = &dev_priv->drm;
2636 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2637 struct drm_atomic_state *state;
2640 if (!HAS_DISPLAY(dev_priv))
2643 /* reset doesn't touch the display */
2644 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2647 state = fetch_and_zero(&dev_priv->modeset_restore_state);
2651 /* reset doesn't touch the display */
2652 if (!gpu_reset_clobbers_display(dev_priv)) {
2653 /* for testing only restore the display */
2654 ret = __intel_display_resume(dev, state, ctx);
2656 drm_err(&dev_priv->drm,
2657 "Restoring old state failed with %i\n", ret);
2660 * The display has been reset as well,
2661 * so need a full re-initialization.
2663 intel_pps_unlock_regs_wa(dev_priv);
2664 intel_modeset_init_hw(dev_priv);
2665 intel_init_clock_gating(dev_priv);
2666 intel_hpd_init(dev_priv);
2668 ret = __intel_display_resume(dev, state, ctx);
2670 drm_err(&dev_priv->drm,
2671 "Restoring old state failed with %i\n", ret);
2673 intel_hpd_poll_disable(dev_priv);
2676 drm_atomic_state_put(state);
2678 drm_modeset_drop_locks(ctx);
2679 drm_modeset_acquire_fini(ctx);
2680 mutex_unlock(&dev->mode_config.mutex);
2682 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2685 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2688 enum pipe pipe = crtc->pipe;
2691 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2694 * Display WA #1153: icl
2695 * enable hardware to bypass the alpha math
2696 * and rounding for per-pixel values 00 and 0xff
2698 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2700 * Display WA # 1605353570: icl
2701 * Set the pixel rounding bit to 1 for allowing
2702 * passthrough of Frame buffer pixels unmodified
2705 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2706 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2709 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2711 struct drm_crtc *crtc;
2714 drm_for_each_crtc(crtc, &dev_priv->drm) {
2715 struct drm_crtc_commit *commit;
2716 spin_lock(&crtc->commit_lock);
2717 commit = list_first_entry_or_null(&crtc->commit_list,
2718 struct drm_crtc_commit, commit_entry);
2719 cleanup_done = commit ?
2720 try_wait_for_completion(&commit->cleanup_done) : true;
2721 spin_unlock(&crtc->commit_lock);
2726 drm_crtc_wait_one_vblank(crtc);
2734 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2738 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2740 mutex_lock(&dev_priv->sb_lock);
2742 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2743 temp |= SBI_SSCCTL_DISABLE;
2744 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2746 mutex_unlock(&dev_priv->sb_lock);
2749 /* Program iCLKIP clock to the desired frequency */
2750 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2752 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2754 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2755 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2758 lpt_disable_iclkip(dev_priv);
2760 /* The iCLK virtual clock root frequency is in MHz,
2761 * but the adjusted_mode->crtc_clock in in KHz. To get the
2762 * divisors, it is necessary to divide one by another, so we
2763 * convert the virtual clock precision to KHz here for higher
2766 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2767 u32 iclk_virtual_root_freq = 172800 * 1000;
2768 u32 iclk_pi_range = 64;
2769 u32 desired_divisor;
2771 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2773 divsel = (desired_divisor / iclk_pi_range) - 2;
2774 phaseinc = desired_divisor % iclk_pi_range;
2777 * Near 20MHz is a corner case which is
2778 * out of range for the 7-bit divisor
2784 /* This should not happen with any sane values */
2785 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2786 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2787 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2788 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2790 drm_dbg_kms(&dev_priv->drm,
2791 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2792 clock, auxdiv, divsel, phasedir, phaseinc);
2794 mutex_lock(&dev_priv->sb_lock);
2796 /* Program SSCDIVINTPHASE6 */
2797 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2798 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2799 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2800 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2801 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2802 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2803 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2804 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2806 /* Program SSCAUXDIV */
2807 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2808 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2809 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2810 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2812 /* Enable modulator and associated divider */
2813 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2814 temp &= ~SBI_SSCCTL_DISABLE;
2815 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2817 mutex_unlock(&dev_priv->sb_lock);
2819 /* Wait for initialization time */
2822 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2825 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2827 u32 divsel, phaseinc, auxdiv;
2828 u32 iclk_virtual_root_freq = 172800 * 1000;
2829 u32 iclk_pi_range = 64;
2830 u32 desired_divisor;
2833 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2836 mutex_lock(&dev_priv->sb_lock);
2838 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2839 if (temp & SBI_SSCCTL_DISABLE) {
2840 mutex_unlock(&dev_priv->sb_lock);
2844 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2845 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2846 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2847 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2848 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2850 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2851 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2852 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2854 mutex_unlock(&dev_priv->sb_lock);
2856 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2858 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2859 desired_divisor << auxdiv);
2862 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2863 enum pipe pch_transcoder)
2865 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2867 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2869 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2870 intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2871 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2872 intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2873 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2874 intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2876 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2877 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2878 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2879 intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2880 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2881 intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2882 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2883 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2886 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2890 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2891 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2894 drm_WARN_ON(&dev_priv->drm,
2895 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2897 drm_WARN_ON(&dev_priv->drm,
2898 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2901 temp &= ~FDI_BC_BIFURCATION_SELECT;
2903 temp |= FDI_BC_BIFURCATION_SELECT;
2905 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2906 enable ? "en" : "dis");
2907 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2908 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2911 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2913 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2914 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2916 switch (crtc->pipe) {
2920 if (crtc_state->fdi_lanes > 2)
2921 cpt_set_fdi_bc_bifurcation(dev_priv, false);
2923 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2927 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2936 * Finds the encoder associated with the given CRTC. This can only be
2937 * used when we know that the CRTC isn't feeding multiple encoders!
2939 struct intel_encoder *
2940 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2941 const struct intel_crtc_state *crtc_state)
2943 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2944 const struct drm_connector_state *connector_state;
2945 const struct drm_connector *connector;
2946 struct intel_encoder *encoder = NULL;
2947 int num_encoders = 0;
2950 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2951 if (connector_state->crtc != &crtc->base)
2954 encoder = to_intel_encoder(connector_state->best_encoder);
2958 drm_WARN(encoder->base.dev, num_encoders != 1,
2959 "%d encoders for pipe %c\n",
2960 num_encoders, pipe_name(crtc->pipe));
2966 * Enable PCH resources required for PCH ports:
2968 * - FDI training & RX/TX
2969 * - update transcoder timings
2970 * - DP transcoding bits
2973 static void ilk_pch_enable(const struct intel_atomic_state *state,
2974 const struct intel_crtc_state *crtc_state)
2976 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2977 struct drm_device *dev = crtc->base.dev;
2978 struct drm_i915_private *dev_priv = to_i915(dev);
2979 enum pipe pipe = crtc->pipe;
2982 assert_pch_transcoder_disabled(dev_priv, pipe);
2984 if (IS_IVYBRIDGE(dev_priv))
2985 ivb_update_fdi_bc_bifurcation(crtc_state);
2987 /* Write the TU size bits before fdi link training, so that error
2988 * detection works. */
2989 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2990 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2992 /* For PCH output, training FDI link */
2993 dev_priv->display.fdi_link_train(crtc, crtc_state);
2995 /* We need to program the right clock selection before writing the pixel
2996 * mutliplier into the DPLL. */
2997 if (HAS_PCH_CPT(dev_priv)) {
3000 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3001 temp |= TRANS_DPLL_ENABLE(pipe);
3002 sel = TRANS_DPLLB_SEL(pipe);
3003 if (crtc_state->shared_dpll ==
3004 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
3008 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3011 /* XXX: pch pll's can be enabled any time before we enable the PCH
3012 * transcoder, and we actually should do this to not upset any PCH
3013 * transcoder that already use the clock when we share it.
3015 * Note that enable_shared_dpll tries to do the right thing, but
3016 * get_shared_dpll unconditionally resets the pll - we need that to have
3017 * the right LVDS enable sequence. */
3018 intel_enable_shared_dpll(crtc_state);
3020 /* set transcoder timing, panel must allow it */
3021 assert_panel_unlocked(dev_priv, pipe);
3022 ilk_pch_transcoder_set_timings(crtc_state, pipe);
3024 intel_fdi_normal_train(crtc);
3026 /* For PCH DP, enable TRANS_DP_CTL */
3027 if (HAS_PCH_CPT(dev_priv) &&
3028 intel_crtc_has_dp_encoder(crtc_state)) {
3029 const struct drm_display_mode *adjusted_mode =
3030 &crtc_state->hw.adjusted_mode;
3031 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3032 i915_reg_t reg = TRANS_DP_CTL(pipe);
3035 temp = intel_de_read(dev_priv, reg);
3036 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3037 TRANS_DP_SYNC_MASK |
3039 temp |= TRANS_DP_OUTPUT_ENABLE;
3040 temp |= bpc << 9; /* same format but at 11:9 */
3042 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
3043 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3044 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
3045 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3047 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
3048 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
3049 temp |= TRANS_DP_PORT_SEL(port);
3051 intel_de_write(dev_priv, reg, temp);
3054 ilk_enable_pch_transcoder(crtc_state);
3057 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
3059 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3060 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3061 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3063 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
3065 lpt_program_iclkip(crtc_state);
3067 /* Set transcoder timing. */
3068 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
3070 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3073 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
3076 i915_reg_t dslreg = PIPEDSL(pipe);
3079 temp = intel_de_read(dev_priv, dslreg);
3081 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
3082 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
3083 drm_err(&dev_priv->drm,
3084 "mode set failed: pipe %c stuck\n",
3089 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
3091 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3092 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3093 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3094 enum pipe pipe = crtc->pipe;
3095 int width = drm_rect_width(dst);
3096 int height = drm_rect_height(dst);
3100 if (!crtc_state->pch_pfit.enabled)
3103 /* Force use of hard-coded filter coefficients
3104 * as some pre-programmed values are broken,
3107 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3108 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3109 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
3111 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3113 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
3114 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
3117 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
3119 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3120 struct drm_device *dev = crtc->base.dev;
3121 struct drm_i915_private *dev_priv = to_i915(dev);
3123 if (!crtc_state->ips_enabled)
3127 * We can only enable IPS after we enable a plane and wait for a vblank
3128 * This function is called from post_plane_update, which is run after
3131 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
3133 if (IS_BROADWELL(dev_priv)) {
3134 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
3135 IPS_ENABLE | IPS_PCODE_CONTROL));
3136 /* Quoting Art Runyan: "its not safe to expect any particular
3137 * value in IPS_CTL bit 31 after enabling IPS through the
3138 * mailbox." Moreover, the mailbox may return a bogus state,
3139 * so we need to just enable it and continue on.
3142 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
3143 /* The bit only becomes 1 in the next vblank, so this wait here
3144 * is essentially intel_wait_for_vblank. If we don't have this
3145 * and don't wait for vblanks until the end of crtc_enable, then
3146 * the HW state readout code will complain that the expected
3147 * IPS_CTL value is not the one we read. */
3148 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
3149 drm_err(&dev_priv->drm,
3150 "Timed out waiting for IPS enable\n");
3154 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
3156 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3157 struct drm_device *dev = crtc->base.dev;
3158 struct drm_i915_private *dev_priv = to_i915(dev);
3160 if (!crtc_state->ips_enabled)
3163 if (IS_BROADWELL(dev_priv)) {
3165 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3167 * Wait for PCODE to finish disabling IPS. The BSpec specified
3168 * 42ms timeout value leads to occasional timeouts so use 100ms
3171 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
3172 drm_err(&dev_priv->drm,
3173 "Timed out waiting for IPS disable\n");
3175 intel_de_write(dev_priv, IPS_CTL, 0);
3176 intel_de_posting_read(dev_priv, IPS_CTL);
3179 /* We need to wait for a vblank before we can disable the plane. */
3180 intel_wait_for_vblank(dev_priv, crtc->pipe);
3183 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
3185 if (intel_crtc->overlay)
3186 (void) intel_overlay_switch_off(intel_crtc->overlay);
3188 /* Let userspace switch the overlay on again. In most cases userspace
3189 * has to recompute where to put it anyway.
3193 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
3194 const struct intel_crtc_state *new_crtc_state)
3196 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3197 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3199 if (!old_crtc_state->ips_enabled)
3202 if (intel_crtc_needs_modeset(new_crtc_state))
3206 * Workaround : Do not read or write the pipe palette/gamma data while
3207 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3209 * Disable IPS before we program the LUT.
3211 if (IS_HASWELL(dev_priv) &&
3212 (new_crtc_state->uapi.color_mgmt_changed ||
3213 new_crtc_state->update_pipe) &&
3214 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3217 return !new_crtc_state->ips_enabled;
3220 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
3221 const struct intel_crtc_state *new_crtc_state)
3223 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3226 if (!new_crtc_state->ips_enabled)
3229 if (intel_crtc_needs_modeset(new_crtc_state))
3233 * Workaround : Do not read or write the pipe palette/gamma data while
3234 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3236 * Re-enable IPS after the LUT has been programmed.
3238 if (IS_HASWELL(dev_priv) &&
3239 (new_crtc_state->uapi.color_mgmt_changed ||
3240 new_crtc_state->update_pipe) &&
3241 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3245 * We can't read out IPS on broadwell, assume the worst and
3246 * forcibly enable IPS on the first fastset.
3248 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
3251 return !old_crtc_state->ips_enabled;
3254 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
3256 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3258 if (!crtc_state->nv12_planes)
3261 /* WA Display #0827: Gen9:all */
3262 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
3268 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
3270 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3272 /* Wa_2006604312:icl,ehl */
3273 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
3279 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
3280 const struct intel_crtc_state *new_crtc_state)
3282 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
3283 new_crtc_state->active_planes;
3286 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
3287 const struct intel_crtc_state *new_crtc_state)
3289 return old_crtc_state->active_planes &&
3290 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
3293 static void intel_post_plane_update(struct intel_atomic_state *state,
3294 struct intel_crtc *crtc)
3296 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3297 const struct intel_crtc_state *old_crtc_state =
3298 intel_atomic_get_old_crtc_state(state, crtc);
3299 const struct intel_crtc_state *new_crtc_state =
3300 intel_atomic_get_new_crtc_state(state, crtc);
3301 enum pipe pipe = crtc->pipe;
3303 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
3305 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
3306 intel_update_watermarks(crtc);
3308 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
3309 hsw_enable_ips(new_crtc_state);
3311 intel_fbc_post_update(state, crtc);
3313 if (needs_nv12_wa(old_crtc_state) &&
3314 !needs_nv12_wa(new_crtc_state))
3315 skl_wa_827(dev_priv, pipe, false);
3317 if (needs_scalerclk_wa(old_crtc_state) &&
3318 !needs_scalerclk_wa(new_crtc_state))
3319 icl_wa_scalerclkgating(dev_priv, pipe, false);
3322 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
3323 struct intel_crtc *crtc)
3325 const struct intel_crtc_state *crtc_state =
3326 intel_atomic_get_new_crtc_state(state, crtc);
3327 u8 update_planes = crtc_state->update_planes;
3328 const struct intel_plane_state *plane_state;
3329 struct intel_plane *plane;
3332 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3333 if (plane->enable_flip_done &&
3334 plane->pipe == crtc->pipe &&
3335 update_planes & BIT(plane->id))
3336 plane->enable_flip_done(plane);
3340 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
3341 struct intel_crtc *crtc)
3343 const struct intel_crtc_state *crtc_state =
3344 intel_atomic_get_new_crtc_state(state, crtc);
3345 u8 update_planes = crtc_state->update_planes;
3346 const struct intel_plane_state *plane_state;
3347 struct intel_plane *plane;
3350 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3351 if (plane->disable_flip_done &&
3352 plane->pipe == crtc->pipe &&
3353 update_planes & BIT(plane->id))
3354 plane->disable_flip_done(plane);
3358 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
3359 struct intel_crtc *crtc)
3361 struct drm_i915_private *i915 = to_i915(state->base.dev);
3362 const struct intel_crtc_state *old_crtc_state =
3363 intel_atomic_get_old_crtc_state(state, crtc);
3364 const struct intel_crtc_state *new_crtc_state =
3365 intel_atomic_get_new_crtc_state(state, crtc);
3366 u8 update_planes = new_crtc_state->update_planes;
3367 const struct intel_plane_state *old_plane_state;
3368 struct intel_plane *plane;
3369 bool need_vbl_wait = false;
3372 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3373 if (plane->need_async_flip_disable_wa &&
3374 plane->pipe == crtc->pipe &&
3375 update_planes & BIT(plane->id)) {
3377 * Apart from the async flip bit we want to
3378 * preserve the old state for the plane.
3380 plane->async_flip(plane, old_crtc_state,
3381 old_plane_state, false);
3382 need_vbl_wait = true;
3387 intel_wait_for_vblank(i915, crtc->pipe);
3390 static void intel_pre_plane_update(struct intel_atomic_state *state,
3391 struct intel_crtc *crtc)
3393 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3394 const struct intel_crtc_state *old_crtc_state =
3395 intel_atomic_get_old_crtc_state(state, crtc);
3396 const struct intel_crtc_state *new_crtc_state =
3397 intel_atomic_get_new_crtc_state(state, crtc);
3398 enum pipe pipe = crtc->pipe;
3400 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
3401 hsw_disable_ips(old_crtc_state);
3403 if (intel_fbc_pre_update(state, crtc))
3404 intel_wait_for_vblank(dev_priv, pipe);
3406 /* Display WA 827 */
3407 if (!needs_nv12_wa(old_crtc_state) &&
3408 needs_nv12_wa(new_crtc_state))
3409 skl_wa_827(dev_priv, pipe, true);
3411 /* Wa_2006604312:icl,ehl */
3412 if (!needs_scalerclk_wa(old_crtc_state) &&
3413 needs_scalerclk_wa(new_crtc_state))
3414 icl_wa_scalerclkgating(dev_priv, pipe, true);
3417 * Vblank time updates from the shadow to live plane control register
3418 * are blocked if the memory self-refresh mode is active at that
3419 * moment. So to make sure the plane gets truly disabled, disable
3420 * first the self-refresh mode. The self-refresh enable bit in turn
3421 * will be checked/applied by the HW only at the next frame start
3422 * event which is after the vblank start event, so we need to have a
3423 * wait-for-vblank between disabling the plane and the pipe.
3425 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
3426 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
3427 intel_wait_for_vblank(dev_priv, pipe);
3430 * IVB workaround: must disable low power watermarks for at least
3431 * one frame before enabling scaling. LP watermarks can be re-enabled
3432 * when scaling is disabled.
3434 * WaCxSRDisabledForSpriteScaling:ivb
3436 if (old_crtc_state->hw.active &&
3437 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
3438 intel_wait_for_vblank(dev_priv, pipe);
3441 * If we're doing a modeset we don't need to do any
3442 * pre-vblank watermark programming here.
3444 if (!intel_crtc_needs_modeset(new_crtc_state)) {
3446 * For platforms that support atomic watermarks, program the
3447 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
3448 * will be the intermediate values that are safe for both pre- and
3449 * post- vblank; when vblank happens, the 'active' values will be set
3450 * to the final 'target' values and we'll do this again to get the
3451 * optimal watermarks. For gen9+ platforms, the values we program here
3452 * will be the final target values which will get automatically latched
3453 * at vblank time; no further programming will be necessary.
3455 * If a platform hasn't been transitioned to atomic watermarks yet,
3456 * we'll continue to update watermarks the old way, if flags tell
3459 if (dev_priv->display.initial_watermarks)
3460 dev_priv->display.initial_watermarks(state, crtc);
3461 else if (new_crtc_state->update_wm_pre)
3462 intel_update_watermarks(crtc);
3466 * Gen2 reports pipe underruns whenever all planes are disabled.
3467 * So disable underrun reporting before all the planes get disabled.
3469 * We do this after .initial_watermarks() so that we have a
3470 * chance of catching underruns with the intermediate watermarks
3471 * vs. the old plane configuration.
3473 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
3474 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3477 * WA for platforms where async address update enable bit
3478 * is double buffered and only latched at start of vblank.
3480 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
3481 intel_crtc_async_flip_disable_wa(state, crtc);
3484 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
3485 struct intel_crtc *crtc)
3487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488 const struct intel_crtc_state *new_crtc_state =
3489 intel_atomic_get_new_crtc_state(state, crtc);
3490 unsigned int update_mask = new_crtc_state->update_planes;
3491 const struct intel_plane_state *old_plane_state;
3492 struct intel_plane *plane;
3493 unsigned fb_bits = 0;
3496 intel_crtc_dpms_overlay_disable(crtc);
3498 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3499 if (crtc->pipe != plane->pipe ||
3500 !(update_mask & BIT(plane->id)))
3503 intel_disable_plane(plane, new_crtc_state);
3505 if (old_plane_state->uapi.visible)
3506 fb_bits |= plane->frontbuffer_bit;
3509 intel_frontbuffer_flip(dev_priv, fb_bits);
3513 * intel_connector_primary_encoder - get the primary encoder for a connector
3514 * @connector: connector for which to return the encoder
3516 * Returns the primary encoder for a connector. There is a 1:1 mapping from
3517 * all connectors to their encoder, except for DP-MST connectors which have
3518 * both a virtual and a primary encoder. These DP-MST primary encoders can be
3519 * pointed to by as many DP-MST connectors as there are pipes.
3521 static struct intel_encoder *
3522 intel_connector_primary_encoder(struct intel_connector *connector)
3524 struct intel_encoder *encoder;
3526 if (connector->mst_port)
3527 return &dp_to_dig_port(connector->mst_port)->base;
3529 encoder = intel_attached_encoder(connector);
3530 drm_WARN_ON(connector->base.dev, !encoder);
3535 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3537 struct drm_connector_state *new_conn_state;
3538 struct drm_connector *connector;
3541 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3543 struct intel_connector *intel_connector;
3544 struct intel_encoder *encoder;
3545 struct intel_crtc *crtc;
3547 if (!intel_connector_needs_modeset(state, connector))
3550 intel_connector = to_intel_connector(connector);
3551 encoder = intel_connector_primary_encoder(intel_connector);
3552 if (!encoder->update_prepare)
3555 crtc = new_conn_state->crtc ?
3556 to_intel_crtc(new_conn_state->crtc) : NULL;
3557 encoder->update_prepare(state, encoder, crtc);
3561 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3563 struct drm_connector_state *new_conn_state;
3564 struct drm_connector *connector;
3567 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3569 struct intel_connector *intel_connector;
3570 struct intel_encoder *encoder;
3571 struct intel_crtc *crtc;
3573 if (!intel_connector_needs_modeset(state, connector))
3576 intel_connector = to_intel_connector(connector);
3577 encoder = intel_connector_primary_encoder(intel_connector);
3578 if (!encoder->update_complete)
3581 crtc = new_conn_state->crtc ?
3582 to_intel_crtc(new_conn_state->crtc) : NULL;
3583 encoder->update_complete(state, encoder, crtc);
3587 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3588 struct intel_crtc *crtc)
3590 const struct intel_crtc_state *crtc_state =
3591 intel_atomic_get_new_crtc_state(state, crtc);
3592 const struct drm_connector_state *conn_state;
3593 struct drm_connector *conn;
3596 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3597 struct intel_encoder *encoder =
3598 to_intel_encoder(conn_state->best_encoder);
3600 if (conn_state->crtc != &crtc->base)
3603 if (encoder->pre_pll_enable)
3604 encoder->pre_pll_enable(state, encoder,
3605 crtc_state, conn_state);
3609 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3610 struct intel_crtc *crtc)
3612 const struct intel_crtc_state *crtc_state =
3613 intel_atomic_get_new_crtc_state(state, crtc);
3614 const struct drm_connector_state *conn_state;
3615 struct drm_connector *conn;
3618 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3619 struct intel_encoder *encoder =
3620 to_intel_encoder(conn_state->best_encoder);
3622 if (conn_state->crtc != &crtc->base)
3625 if (encoder->pre_enable)
3626 encoder->pre_enable(state, encoder,
3627 crtc_state, conn_state);
3631 static void intel_encoders_enable(struct intel_atomic_state *state,
3632 struct intel_crtc *crtc)
3634 const struct intel_crtc_state *crtc_state =
3635 intel_atomic_get_new_crtc_state(state, crtc);
3636 const struct drm_connector_state *conn_state;
3637 struct drm_connector *conn;
3640 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3641 struct intel_encoder *encoder =
3642 to_intel_encoder(conn_state->best_encoder);
3644 if (conn_state->crtc != &crtc->base)
3647 if (encoder->enable)
3648 encoder->enable(state, encoder,
3649 crtc_state, conn_state);
3650 intel_opregion_notify_encoder(encoder, true);
3654 static void intel_encoders_disable(struct intel_atomic_state *state,
3655 struct intel_crtc *crtc)
3657 const struct intel_crtc_state *old_crtc_state =
3658 intel_atomic_get_old_crtc_state(state, crtc);
3659 const struct drm_connector_state *old_conn_state;
3660 struct drm_connector *conn;
3663 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3664 struct intel_encoder *encoder =
3665 to_intel_encoder(old_conn_state->best_encoder);
3667 if (old_conn_state->crtc != &crtc->base)
3670 intel_opregion_notify_encoder(encoder, false);
3671 if (encoder->disable)
3672 encoder->disable(state, encoder,
3673 old_crtc_state, old_conn_state);
3677 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3678 struct intel_crtc *crtc)
3680 const struct intel_crtc_state *old_crtc_state =
3681 intel_atomic_get_old_crtc_state(state, crtc);
3682 const struct drm_connector_state *old_conn_state;
3683 struct drm_connector *conn;
3686 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3687 struct intel_encoder *encoder =
3688 to_intel_encoder(old_conn_state->best_encoder);
3690 if (old_conn_state->crtc != &crtc->base)
3693 if (encoder->post_disable)
3694 encoder->post_disable(state, encoder,
3695 old_crtc_state, old_conn_state);
3699 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3700 struct intel_crtc *crtc)
3702 const struct intel_crtc_state *old_crtc_state =
3703 intel_atomic_get_old_crtc_state(state, crtc);
3704 const struct drm_connector_state *old_conn_state;
3705 struct drm_connector *conn;
3708 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3709 struct intel_encoder *encoder =
3710 to_intel_encoder(old_conn_state->best_encoder);
3712 if (old_conn_state->crtc != &crtc->base)
3715 if (encoder->post_pll_disable)
3716 encoder->post_pll_disable(state, encoder,
3717 old_crtc_state, old_conn_state);
3721 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3722 struct intel_crtc *crtc)
3724 const struct intel_crtc_state *crtc_state =
3725 intel_atomic_get_new_crtc_state(state, crtc);
3726 const struct drm_connector_state *conn_state;
3727 struct drm_connector *conn;
3730 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3731 struct intel_encoder *encoder =
3732 to_intel_encoder(conn_state->best_encoder);
3734 if (conn_state->crtc != &crtc->base)
3737 if (encoder->update_pipe)
3738 encoder->update_pipe(state, encoder,
3739 crtc_state, conn_state);
3743 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3745 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3746 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3748 plane->disable_plane(plane, crtc_state);
3751 static void ilk_crtc_enable(struct intel_atomic_state *state,
3752 struct intel_crtc *crtc)
3754 const struct intel_crtc_state *new_crtc_state =
3755 intel_atomic_get_new_crtc_state(state, crtc);
3756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3757 enum pipe pipe = crtc->pipe;
3759 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3763 * Sometimes spurious CPU pipe underruns happen during FDI
3764 * training, at least with VGA+HDMI cloning. Suppress them.
3766 * On ILK we get an occasional spurious CPU pipe underruns
3767 * between eDP port A enable and vdd enable. Also PCH port
3768 * enable seems to result in the occasional CPU pipe underrun.
3770 * Spurious PCH underruns also occur during PCH enabling.
3772 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3773 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3775 if (new_crtc_state->has_pch_encoder)
3776 intel_prepare_shared_dpll(new_crtc_state);
3778 if (intel_crtc_has_dp_encoder(new_crtc_state))
3779 intel_dp_set_m_n(new_crtc_state, M1_N1);
3781 intel_set_transcoder_timings(new_crtc_state);
3782 intel_set_pipe_src_size(new_crtc_state);
3784 if (new_crtc_state->has_pch_encoder)
3785 intel_cpu_transcoder_set_m_n(new_crtc_state,
3786 &new_crtc_state->fdi_m_n, NULL);
3788 ilk_set_pipeconf(new_crtc_state);
3790 crtc->active = true;
3792 intel_encoders_pre_enable(state, crtc);
3794 if (new_crtc_state->has_pch_encoder) {
3795 /* Note: FDI PLL enabling _must_ be done before we enable the
3796 * cpu pipes, hence this is separate from all the other fdi/pch
3798 ilk_fdi_pll_enable(new_crtc_state);
3800 assert_fdi_tx_disabled(dev_priv, pipe);
3801 assert_fdi_rx_disabled(dev_priv, pipe);
3804 ilk_pfit_enable(new_crtc_state);
3807 * On ILK+ LUT must be loaded before the pipe is running but with
3810 intel_color_load_luts(new_crtc_state);
3811 intel_color_commit(new_crtc_state);
3812 /* update DSPCNTR to configure gamma for pipe bottom color */
3813 intel_disable_primary_plane(new_crtc_state);
3815 if (dev_priv->display.initial_watermarks)
3816 dev_priv->display.initial_watermarks(state, crtc);
3817 intel_enable_pipe(new_crtc_state);
3819 if (new_crtc_state->has_pch_encoder)
3820 ilk_pch_enable(state, new_crtc_state);
3822 intel_crtc_vblank_on(new_crtc_state);
3824 intel_encoders_enable(state, crtc);
3826 if (HAS_PCH_CPT(dev_priv))
3827 cpt_verify_modeset(dev_priv, pipe);
3830 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3831 * And a second vblank wait is needed at least on ILK with
3832 * some interlaced HDMI modes. Let's do the double wait always
3833 * in case there are more corner cases we don't know about.
3835 if (new_crtc_state->has_pch_encoder) {
3836 intel_wait_for_vblank(dev_priv, pipe);
3837 intel_wait_for_vblank(dev_priv, pipe);
3839 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3840 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3843 /* IPS only exists on ULT machines and is tied to pipe A. */
3844 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3846 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3849 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3850 enum pipe pipe, bool apply)
3852 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3853 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3860 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3863 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3865 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3866 enum pipe pipe = crtc->pipe;
3869 val = MBUS_DBOX_A_CREDIT(2);
3871 if (INTEL_GEN(dev_priv) >= 12) {
3872 val |= MBUS_DBOX_BW_CREDIT(2);
3873 val |= MBUS_DBOX_B_CREDIT(12);
3875 val |= MBUS_DBOX_BW_CREDIT(1);
3876 val |= MBUS_DBOX_B_CREDIT(8);
3879 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3882 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3884 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3885 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3887 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3888 HSW_LINETIME(crtc_state->linetime) |
3889 HSW_IPS_LINETIME(crtc_state->ips_linetime));
3892 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3894 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3896 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3899 val = intel_de_read(dev_priv, reg);
3900 val &= ~HSW_FRAME_START_DELAY_MASK;
3901 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3902 intel_de_write(dev_priv, reg, val);
3905 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3906 const struct intel_crtc_state *crtc_state)
3908 struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3909 struct intel_crtc_state *master_crtc_state;
3910 struct drm_connector_state *conn_state;
3911 struct drm_connector *conn;
3912 struct intel_encoder *encoder = NULL;
3915 if (crtc_state->bigjoiner_slave)
3916 master = crtc_state->bigjoiner_linked_crtc;
3918 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3920 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3921 if (conn_state->crtc != &master->base)
3924 encoder = to_intel_encoder(conn_state->best_encoder);
3928 if (!crtc_state->bigjoiner_slave) {
3929 /* need to enable VDSC, which we skipped in pre-enable */
3930 intel_dsc_enable(encoder, crtc_state);
3933 * Enable sequence steps 1-7 on bigjoiner master
3935 intel_encoders_pre_pll_enable(state, master);
3936 intel_enable_shared_dpll(master_crtc_state);
3937 intel_encoders_pre_enable(state, master);
3939 /* and DSC on slave */
3940 intel_dsc_enable(NULL, crtc_state);
3944 static void hsw_crtc_enable(struct intel_atomic_state *state,
3945 struct intel_crtc *crtc)
3947 const struct intel_crtc_state *new_crtc_state =
3948 intel_atomic_get_new_crtc_state(state, crtc);
3949 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3950 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3951 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3952 bool psl_clkgate_wa;
3954 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3957 if (!new_crtc_state->bigjoiner) {
3958 intel_encoders_pre_pll_enable(state, crtc);
3960 if (new_crtc_state->shared_dpll)
3961 intel_enable_shared_dpll(new_crtc_state);
3963 intel_encoders_pre_enable(state, crtc);
3965 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3968 intel_set_pipe_src_size(new_crtc_state);
3969 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3970 bdw_set_pipemisc(new_crtc_state);
3972 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3973 intel_set_transcoder_timings(new_crtc_state);
3975 if (cpu_transcoder != TRANSCODER_EDP)
3976 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3977 new_crtc_state->pixel_multiplier - 1);
3979 if (new_crtc_state->has_pch_encoder)
3980 intel_cpu_transcoder_set_m_n(new_crtc_state,
3981 &new_crtc_state->fdi_m_n, NULL);
3983 hsw_set_frame_start_delay(new_crtc_state);
3986 if (!transcoder_is_dsi(cpu_transcoder))
3987 hsw_set_pipeconf(new_crtc_state);
3989 crtc->active = true;
3991 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3992 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3993 new_crtc_state->pch_pfit.enabled;
3995 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3997 if (INTEL_GEN(dev_priv) >= 9)
3998 skl_pfit_enable(new_crtc_state);
4000 ilk_pfit_enable(new_crtc_state);
4003 * On ILK+ LUT must be loaded before the pipe is running but with
4006 intel_color_load_luts(new_crtc_state);
4007 intel_color_commit(new_crtc_state);
4008 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
4009 if (INTEL_GEN(dev_priv) < 9)
4010 intel_disable_primary_plane(new_crtc_state);
4012 hsw_set_linetime_wm(new_crtc_state);
4014 if (INTEL_GEN(dev_priv) >= 11)
4015 icl_set_pipe_chicken(crtc);
4017 if (dev_priv->display.initial_watermarks)
4018 dev_priv->display.initial_watermarks(state, crtc);
4020 if (INTEL_GEN(dev_priv) >= 11)
4021 icl_pipe_mbus_enable(crtc);
4023 if (new_crtc_state->bigjoiner_slave)
4024 intel_crtc_vblank_on(new_crtc_state);
4026 intel_encoders_enable(state, crtc);
4028 if (psl_clkgate_wa) {
4029 intel_wait_for_vblank(dev_priv, pipe);
4030 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
4033 /* If we change the relative order between pipe/planes enabling, we need
4034 * to change the workaround. */
4035 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
4036 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
4037 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4038 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4042 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4044 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4045 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4046 enum pipe pipe = crtc->pipe;
4048 /* To avoid upsetting the power well on haswell only disable the pfit if
4049 * it's in use. The hw state code will make sure we get this right. */
4050 if (!old_crtc_state->pch_pfit.enabled)
4053 intel_de_write(dev_priv, PF_CTL(pipe), 0);
4054 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
4055 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
4058 static void ilk_crtc_disable(struct intel_atomic_state *state,
4059 struct intel_crtc *crtc)
4061 const struct intel_crtc_state *old_crtc_state =
4062 intel_atomic_get_old_crtc_state(state, crtc);
4063 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4064 enum pipe pipe = crtc->pipe;
4067 * Sometimes spurious CPU pipe underruns happen when the
4068 * pipe is already disabled, but FDI RX/TX is still enabled.
4069 * Happens at least with VGA+HDMI cloning. Suppress them.
4071 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4072 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4074 intel_encoders_disable(state, crtc);
4076 intel_crtc_vblank_off(old_crtc_state);
4078 intel_disable_pipe(old_crtc_state);
4080 ilk_pfit_disable(old_crtc_state);
4082 if (old_crtc_state->has_pch_encoder)
4083 ilk_fdi_disable(crtc);
4085 intel_encoders_post_disable(state, crtc);
4087 if (old_crtc_state->has_pch_encoder) {
4088 ilk_disable_pch_transcoder(dev_priv, pipe);
4090 if (HAS_PCH_CPT(dev_priv)) {
4094 /* disable TRANS_DP_CTL */
4095 reg = TRANS_DP_CTL(pipe);
4096 temp = intel_de_read(dev_priv, reg);
4097 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4098 TRANS_DP_PORT_SEL_MASK);
4099 temp |= TRANS_DP_PORT_SEL_NONE;
4100 intel_de_write(dev_priv, reg, temp);
4102 /* disable DPLL_SEL */
4103 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
4104 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4105 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
4108 ilk_fdi_pll_disable(crtc);
4111 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4112 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4115 static void hsw_crtc_disable(struct intel_atomic_state *state,
4116 struct intel_crtc *crtc)
4119 * FIXME collapse everything to one hook.
4120 * Need care with mst->ddi interactions.
4122 intel_encoders_disable(state, crtc);
4123 intel_encoders_post_disable(state, crtc);
4126 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
4128 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4129 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4131 if (!crtc_state->gmch_pfit.control)
4135 * The panel fitter should only be adjusted whilst the pipe is disabled,
4136 * according to register description and PRM.
4138 drm_WARN_ON(&dev_priv->drm,
4139 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
4140 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
4142 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
4143 crtc_state->gmch_pfit.pgm_ratios);
4144 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
4146 /* Border color in case we don't scale up to the full screen. Black by
4147 * default, change to something else for debugging. */
4148 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
4151 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
4153 if (phy == PHY_NONE)
4155 else if (IS_ALDERLAKE_S(dev_priv))
4156 return phy <= PHY_E;
4157 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
4158 return phy <= PHY_D;
4159 else if (IS_JSL_EHL(dev_priv))
4160 return phy <= PHY_C;
4161 else if (INTEL_GEN(dev_priv) >= 11)
4162 return phy <= PHY_B;
4167 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
4169 if (IS_TIGERLAKE(dev_priv))
4170 return phy >= PHY_D && phy <= PHY_I;
4171 else if (IS_ICELAKE(dev_priv))
4172 return phy >= PHY_C && phy <= PHY_F;
4177 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
4179 if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
4180 return PHY_B + port - PORT_TC1;
4181 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
4182 return PHY_C + port - PORT_TC1;
4183 else if (IS_JSL_EHL(i915) && port == PORT_D)
4186 return PHY_A + port - PORT_A;
4189 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
4191 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
4192 return TC_PORT_NONE;
4194 if (INTEL_GEN(dev_priv) >= 12)
4195 return TC_PORT_1 + port - PORT_TC1;
4197 return TC_PORT_1 + port - PORT_C;
4200 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
4204 return POWER_DOMAIN_PORT_DDI_A_LANES;
4206 return POWER_DOMAIN_PORT_DDI_B_LANES;
4208 return POWER_DOMAIN_PORT_DDI_C_LANES;
4210 return POWER_DOMAIN_PORT_DDI_D_LANES;
4212 return POWER_DOMAIN_PORT_DDI_E_LANES;
4214 return POWER_DOMAIN_PORT_DDI_F_LANES;
4216 return POWER_DOMAIN_PORT_DDI_G_LANES;
4218 return POWER_DOMAIN_PORT_DDI_H_LANES;
4220 return POWER_DOMAIN_PORT_DDI_I_LANES;
4223 return POWER_DOMAIN_PORT_OTHER;
4227 enum intel_display_power_domain
4228 intel_aux_power_domain(struct intel_digital_port *dig_port)
4230 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4231 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
4233 if (intel_phy_is_tc(dev_priv, phy) &&
4234 dig_port->tc_mode == TC_PORT_TBT_ALT) {
4235 switch (dig_port->aux_ch) {
4237 return POWER_DOMAIN_AUX_C_TBT;
4239 return POWER_DOMAIN_AUX_D_TBT;
4241 return POWER_DOMAIN_AUX_E_TBT;
4243 return POWER_DOMAIN_AUX_F_TBT;
4245 return POWER_DOMAIN_AUX_G_TBT;
4247 return POWER_DOMAIN_AUX_H_TBT;
4249 return POWER_DOMAIN_AUX_I_TBT;
4251 MISSING_CASE(dig_port->aux_ch);
4252 return POWER_DOMAIN_AUX_C_TBT;
4256 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
4260 * Converts aux_ch to power_domain without caring about TBT ports for that use
4261 * intel_aux_power_domain()
4263 enum intel_display_power_domain
4264 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
4268 return POWER_DOMAIN_AUX_A;
4270 return POWER_DOMAIN_AUX_B;
4272 return POWER_DOMAIN_AUX_C;
4274 return POWER_DOMAIN_AUX_D;
4276 return POWER_DOMAIN_AUX_E;
4278 return POWER_DOMAIN_AUX_F;
4280 return POWER_DOMAIN_AUX_G;
4282 return POWER_DOMAIN_AUX_H;
4284 return POWER_DOMAIN_AUX_I;
4286 MISSING_CASE(aux_ch);
4287 return POWER_DOMAIN_AUX_A;
4291 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4293 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4294 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4295 struct drm_encoder *encoder;
4296 enum pipe pipe = crtc->pipe;
4298 enum transcoder transcoder = crtc_state->cpu_transcoder;
4300 if (!crtc_state->hw.active)
4303 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
4304 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
4305 if (crtc_state->pch_pfit.enabled ||
4306 crtc_state->pch_pfit.force_thru)
4307 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4309 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
4310 crtc_state->uapi.encoder_mask) {
4311 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4313 mask |= BIT_ULL(intel_encoder->power_domain);
4316 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
4317 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
4319 if (crtc_state->shared_dpll)
4320 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
4322 if (crtc_state->dsc.compression_enable)
4323 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
4329 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4331 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4333 enum intel_display_power_domain domain;
4334 u64 domains, new_domains, old_domains;
4336 domains = get_crtc_power_domains(crtc_state);
4338 new_domains = domains & ~crtc->enabled_power_domains.mask;
4339 old_domains = crtc->enabled_power_domains.mask & ~domains;
4341 for_each_power_domain(domain, new_domains)
4342 intel_display_power_get_in_set(dev_priv,
4343 &crtc->enabled_power_domains,
4349 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
4352 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
4353 &crtc->enabled_power_domains,
4357 static void valleyview_crtc_enable(struct intel_atomic_state *state,
4358 struct intel_crtc *crtc)
4360 const struct intel_crtc_state *new_crtc_state =
4361 intel_atomic_get_new_crtc_state(state, crtc);
4362 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4363 enum pipe pipe = crtc->pipe;
4365 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4368 if (intel_crtc_has_dp_encoder(new_crtc_state))
4369 intel_dp_set_m_n(new_crtc_state, M1_N1);
4371 intel_set_transcoder_timings(new_crtc_state);
4372 intel_set_pipe_src_size(new_crtc_state);
4374 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4375 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4376 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
4379 i9xx_set_pipeconf(new_crtc_state);
4381 crtc->active = true;
4383 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4385 intel_encoders_pre_pll_enable(state, crtc);
4387 if (IS_CHERRYVIEW(dev_priv)) {
4388 chv_prepare_pll(crtc, new_crtc_state);
4389 chv_enable_pll(crtc, new_crtc_state);
4391 vlv_prepare_pll(crtc, new_crtc_state);
4392 vlv_enable_pll(crtc, new_crtc_state);
4395 intel_encoders_pre_enable(state, crtc);
4397 i9xx_pfit_enable(new_crtc_state);
4399 intel_color_load_luts(new_crtc_state);
4400 intel_color_commit(new_crtc_state);
4401 /* update DSPCNTR to configure gamma for pipe bottom color */
4402 intel_disable_primary_plane(new_crtc_state);
4404 dev_priv->display.initial_watermarks(state, crtc);
4405 intel_enable_pipe(new_crtc_state);
4407 intel_crtc_vblank_on(new_crtc_state);
4409 intel_encoders_enable(state, crtc);
4412 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
4414 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4415 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4417 intel_de_write(dev_priv, FP0(crtc->pipe),
4418 crtc_state->dpll_hw_state.fp0);
4419 intel_de_write(dev_priv, FP1(crtc->pipe),
4420 crtc_state->dpll_hw_state.fp1);
4423 static void i9xx_crtc_enable(struct intel_atomic_state *state,
4424 struct intel_crtc *crtc)
4426 const struct intel_crtc_state *new_crtc_state =
4427 intel_atomic_get_new_crtc_state(state, crtc);
4428 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4429 enum pipe pipe = crtc->pipe;
4431 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4434 i9xx_set_pll_dividers(new_crtc_state);
4436 if (intel_crtc_has_dp_encoder(new_crtc_state))
4437 intel_dp_set_m_n(new_crtc_state, M1_N1);
4439 intel_set_transcoder_timings(new_crtc_state);
4440 intel_set_pipe_src_size(new_crtc_state);
4442 i9xx_set_pipeconf(new_crtc_state);
4444 crtc->active = true;
4446 if (!IS_GEN(dev_priv, 2))
4447 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4449 intel_encoders_pre_enable(state, crtc);
4451 i9xx_enable_pll(crtc, new_crtc_state);
4453 i9xx_pfit_enable(new_crtc_state);
4455 intel_color_load_luts(new_crtc_state);
4456 intel_color_commit(new_crtc_state);
4457 /* update DSPCNTR to configure gamma for pipe bottom color */
4458 intel_disable_primary_plane(new_crtc_state);
4460 if (dev_priv->display.initial_watermarks)
4461 dev_priv->display.initial_watermarks(state, crtc);
4463 intel_update_watermarks(crtc);
4464 intel_enable_pipe(new_crtc_state);
4466 intel_crtc_vblank_on(new_crtc_state);
4468 intel_encoders_enable(state, crtc);
4470 /* prevents spurious underruns */
4471 if (IS_GEN(dev_priv, 2))
4472 intel_wait_for_vblank(dev_priv, pipe);
4475 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4477 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4478 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4480 if (!old_crtc_state->gmch_pfit.control)
4483 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
4485 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
4486 intel_de_read(dev_priv, PFIT_CONTROL));
4487 intel_de_write(dev_priv, PFIT_CONTROL, 0);
4490 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4491 struct intel_crtc *crtc)
4493 struct intel_crtc_state *old_crtc_state =
4494 intel_atomic_get_old_crtc_state(state, crtc);
4495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4496 enum pipe pipe = crtc->pipe;
4499 * On gen2 planes are double buffered but the pipe isn't, so we must
4500 * wait for planes to fully turn off before disabling the pipe.
4502 if (IS_GEN(dev_priv, 2))
4503 intel_wait_for_vblank(dev_priv, pipe);
4505 intel_encoders_disable(state, crtc);
4507 intel_crtc_vblank_off(old_crtc_state);
4509 intel_disable_pipe(old_crtc_state);
4511 i9xx_pfit_disable(old_crtc_state);
4513 intel_encoders_post_disable(state, crtc);
4515 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4516 if (IS_CHERRYVIEW(dev_priv))
4517 chv_disable_pll(dev_priv, pipe);
4518 else if (IS_VALLEYVIEW(dev_priv))
4519 vlv_disable_pll(dev_priv, pipe);
4521 i9xx_disable_pll(old_crtc_state);
4524 intel_encoders_post_pll_disable(state, crtc);
4526 if (!IS_GEN(dev_priv, 2))
4527 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4529 if (!dev_priv->display.initial_watermarks)
4530 intel_update_watermarks(crtc);
4532 /* clock the pipe down to 640x480@60 to potentially save power */
4533 if (IS_I830(dev_priv))
4534 i830_enable_pipe(dev_priv, pipe);
4537 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4538 struct drm_modeset_acquire_ctx *ctx)
4540 struct intel_encoder *encoder;
4541 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4542 struct intel_bw_state *bw_state =
4543 to_intel_bw_state(dev_priv->bw_obj.state);
4544 struct intel_cdclk_state *cdclk_state =
4545 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4546 struct intel_dbuf_state *dbuf_state =
4547 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4548 struct intel_crtc_state *crtc_state =
4549 to_intel_crtc_state(crtc->base.state);
4550 struct intel_plane *plane;
4551 struct drm_atomic_state *state;
4552 struct intel_crtc_state *temp_crtc_state;
4553 enum pipe pipe = crtc->pipe;
4556 if (!crtc_state->hw.active)
4559 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4560 const struct intel_plane_state *plane_state =
4561 to_intel_plane_state(plane->base.state);
4563 if (plane_state->uapi.visible)
4564 intel_plane_disable_noatomic(crtc, plane);
4567 state = drm_atomic_state_alloc(&dev_priv->drm);
4569 drm_dbg_kms(&dev_priv->drm,
4570 "failed to disable [CRTC:%d:%s], out of memory",
4571 crtc->base.base.id, crtc->base.name);
4575 state->acquire_ctx = ctx;
4577 /* Everything's already locked, -EDEADLK can't happen. */
4578 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4579 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4581 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4583 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4585 drm_atomic_state_put(state);
4587 drm_dbg_kms(&dev_priv->drm,
4588 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4589 crtc->base.base.id, crtc->base.name);
4591 crtc->active = false;
4592 crtc->base.enabled = false;
4594 drm_WARN_ON(&dev_priv->drm,
4595 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4596 crtc_state->uapi.active = false;
4597 crtc_state->uapi.connector_mask = 0;
4598 crtc_state->uapi.encoder_mask = 0;
4599 intel_crtc_free_hw_state(crtc_state);
4600 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4602 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4603 encoder->base.crtc = NULL;
4605 intel_fbc_disable(crtc);
4606 intel_update_watermarks(crtc);
4607 intel_disable_shared_dpll(crtc_state);
4609 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4611 dev_priv->active_pipes &= ~BIT(pipe);
4612 cdclk_state->min_cdclk[pipe] = 0;
4613 cdclk_state->min_voltage_level[pipe] = 0;
4614 cdclk_state->active_pipes &= ~BIT(pipe);
4616 dbuf_state->active_pipes &= ~BIT(pipe);
4618 bw_state->data_rate[pipe] = 0;
4619 bw_state->num_active_planes[pipe] = 0;
4623 * turn all crtc's off, but do not adjust state
4624 * This has to be paired with a call to intel_modeset_setup_hw_state.
4626 int intel_display_suspend(struct drm_device *dev)
4628 struct drm_i915_private *dev_priv = to_i915(dev);
4629 struct drm_atomic_state *state;
4632 state = drm_atomic_helper_suspend(dev);
4633 ret = PTR_ERR_OR_ZERO(state);
4635 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4638 dev_priv->modeset_restore_state = state;
4642 void intel_encoder_destroy(struct drm_encoder *encoder)
4644 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4646 drm_encoder_cleanup(encoder);
4647 kfree(intel_encoder);
4650 /* Cross check the actual hw state with our own modeset state tracking (and it's
4651 * internal consistency). */
4652 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4653 struct drm_connector_state *conn_state)
4655 struct intel_connector *connector = to_intel_connector(conn_state->connector);
4656 struct drm_i915_private *i915 = to_i915(connector->base.dev);
4658 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4659 connector->base.base.id, connector->base.name);
4661 if (connector->get_hw_state(connector)) {
4662 struct intel_encoder *encoder = intel_attached_encoder(connector);
4664 I915_STATE_WARN(!crtc_state,
4665 "connector enabled without attached crtc\n");
4670 I915_STATE_WARN(!crtc_state->hw.active,
4671 "connector is active, but attached crtc isn't\n");
4673 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4676 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4677 "atomic encoder doesn't match attached encoder\n");
4679 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4680 "attached encoder crtc differs from connector crtc\n");
4682 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4683 "attached crtc is active, but connector isn't\n");
4684 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4685 "best encoder set without crtc!\n");
4689 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4691 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4692 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4694 /* IPS only exists on ULT machines and is tied to pipe A. */
4695 if (!hsw_crtc_supports_ips(crtc))
4698 if (!dev_priv->params.enable_ips)
4701 if (crtc_state->pipe_bpp > 24)
4705 * We compare against max which means we must take
4706 * the increased cdclk requirement into account when
4707 * calculating the new cdclk.
4709 * Should measure whether using a lower cdclk w/o IPS
4711 if (IS_BROADWELL(dev_priv) &&
4712 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4718 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4720 struct drm_i915_private *dev_priv =
4721 to_i915(crtc_state->uapi.crtc->dev);
4722 struct intel_atomic_state *state =
4723 to_intel_atomic_state(crtc_state->uapi.state);
4725 crtc_state->ips_enabled = false;
4727 if (!hsw_crtc_state_ips_capable(crtc_state))
4731 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4732 * enabled and disabled dynamically based on package C states,
4733 * user space can't make reliable use of the CRCs, so let's just
4734 * completely disable it.
4736 if (crtc_state->crc_enabled)
4739 /* IPS should be fine as long as at least one plane is enabled. */
4740 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4743 if (IS_BROADWELL(dev_priv)) {
4744 const struct intel_cdclk_state *cdclk_state;
4746 cdclk_state = intel_atomic_get_cdclk_state(state);
4747 if (IS_ERR(cdclk_state))
4748 return PTR_ERR(cdclk_state);
4750 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4751 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4755 crtc_state->ips_enabled = true;
4760 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4762 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4764 /* GDG double wide on either pipe, otherwise pipe A only */
4765 return INTEL_GEN(dev_priv) < 4 &&
4766 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4769 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4771 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4772 unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
4775 * We only use IF-ID interlacing. If we ever use
4776 * PF-ID we'll need to adjust the pixel_rate here.
4779 if (!crtc_state->pch_pfit.enabled)
4782 pipe_w = crtc_state->pipe_src_w;
4783 pipe_h = crtc_state->pipe_src_h;
4785 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
4786 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
4788 if (pipe_w < pfit_w)
4790 if (pipe_h < pfit_h)
4793 if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4794 !pfit_w || !pfit_h))
4797 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4801 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4802 const struct drm_display_mode *timings)
4804 mode->hdisplay = timings->crtc_hdisplay;
4805 mode->htotal = timings->crtc_htotal;
4806 mode->hsync_start = timings->crtc_hsync_start;
4807 mode->hsync_end = timings->crtc_hsync_end;
4809 mode->vdisplay = timings->crtc_vdisplay;
4810 mode->vtotal = timings->crtc_vtotal;
4811 mode->vsync_start = timings->crtc_vsync_start;
4812 mode->vsync_end = timings->crtc_vsync_end;
4814 mode->flags = timings->flags;
4815 mode->type = DRM_MODE_TYPE_DRIVER;
4817 mode->clock = timings->crtc_clock;
4819 drm_mode_set_name(mode);
4822 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4824 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4826 if (HAS_GMCH(dev_priv))
4827 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
4828 crtc_state->pixel_rate =
4829 crtc_state->hw.pipe_mode.crtc_clock;
4831 crtc_state->pixel_rate =
4832 ilk_pipe_pixel_rate(crtc_state);
4835 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4837 struct drm_display_mode *mode = &crtc_state->hw.mode;
4838 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4839 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4841 drm_mode_copy(pipe_mode, adjusted_mode);
4843 if (crtc_state->bigjoiner) {
4845 * transcoder is programmed to the full mode,
4846 * but pipe timings are half of the transcoder mode
4848 pipe_mode->crtc_hdisplay /= 2;
4849 pipe_mode->crtc_hblank_start /= 2;
4850 pipe_mode->crtc_hblank_end /= 2;
4851 pipe_mode->crtc_hsync_start /= 2;
4852 pipe_mode->crtc_hsync_end /= 2;
4853 pipe_mode->crtc_htotal /= 2;
4854 pipe_mode->crtc_clock /= 2;
4857 if (crtc_state->splitter.enable) {
4858 int n = crtc_state->splitter.link_count;
4859 int overlap = crtc_state->splitter.pixel_overlap;
4862 * eDP MSO uses segment timings from EDID for transcoder
4863 * timings, but full mode for everything else.
4865 * h_full = (h_segment - pixel_overlap) * link_count
4867 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4868 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4869 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4870 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4871 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4872 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4873 pipe_mode->crtc_clock *= n;
4875 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4876 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4878 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4879 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4882 intel_crtc_compute_pixel_rate(crtc_state);
4884 drm_mode_copy(mode, adjusted_mode);
4885 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4886 mode->vdisplay = crtc_state->pipe_src_h;
4889 static void intel_encoder_get_config(struct intel_encoder *encoder,
4890 struct intel_crtc_state *crtc_state)
4892 encoder->get_config(encoder, crtc_state);
4894 intel_crtc_readout_derived_state(crtc_state);
4897 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4898 struct intel_crtc_state *pipe_config)
4900 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4901 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4902 int clock_limit = dev_priv->max_dotclk_freq;
4904 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4906 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4907 if (pipe_config->bigjoiner) {
4908 pipe_mode->crtc_clock /= 2;
4909 pipe_mode->crtc_hdisplay /= 2;
4910 pipe_mode->crtc_hblank_start /= 2;
4911 pipe_mode->crtc_hblank_end /= 2;
4912 pipe_mode->crtc_hsync_start /= 2;
4913 pipe_mode->crtc_hsync_end /= 2;
4914 pipe_mode->crtc_htotal /= 2;
4915 pipe_config->pipe_src_w /= 2;
4918 if (pipe_config->splitter.enable) {
4919 int n = pipe_config->splitter.link_count;
4920 int overlap = pipe_config->splitter.pixel_overlap;
4922 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4923 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4924 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4925 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4926 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4927 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4928 pipe_mode->crtc_clock *= n;
4931 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4933 if (INTEL_GEN(dev_priv) < 4) {
4934 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4937 * Enable double wide mode when the dot clock
4938 * is > 90% of the (display) core speed.
4940 if (intel_crtc_supports_double_wide(crtc) &&
4941 pipe_mode->crtc_clock > clock_limit) {
4942 clock_limit = dev_priv->max_dotclk_freq;
4943 pipe_config->double_wide = true;
4947 if (pipe_mode->crtc_clock > clock_limit) {
4948 drm_dbg_kms(&dev_priv->drm,
4949 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4950 pipe_mode->crtc_clock, clock_limit,
4951 yesno(pipe_config->double_wide));
4956 * Pipe horizontal size must be even in:
4958 * - LVDS dual channel mode
4959 * - Double wide pipe
4961 if (pipe_config->pipe_src_w & 1) {
4962 if (pipe_config->double_wide) {
4963 drm_dbg_kms(&dev_priv->drm,
4964 "Odd pipe source width not supported with double wide pipe\n");
4968 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4969 intel_is_dual_link_lvds(dev_priv)) {
4970 drm_dbg_kms(&dev_priv->drm,
4971 "Odd pipe source width not supported with dual link LVDS\n");
4976 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4977 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4979 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4980 pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4983 intel_crtc_compute_pixel_rate(pipe_config);
4985 if (pipe_config->has_pch_encoder)
4986 return ilk_fdi_compute_config(crtc, pipe_config);
4992 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4994 while (*num > DATA_LINK_M_N_MASK ||
4995 *den > DATA_LINK_M_N_MASK) {
5001 static void compute_m_n(unsigned int m, unsigned int n,
5002 u32 *ret_m, u32 *ret_n,
5006 * Several DP dongles in particular seem to be fussy about
5007 * too large link M/N values. Give N value as 0x8000 that
5008 * should be acceptable by specific devices. 0x8000 is the
5009 * specified fixed N value for asynchronous clock mode,
5010 * which the devices expect also in synchronous clock mode.
5013 *ret_n = DP_LINK_CONSTANT_N_VALUE;
5015 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5017 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
5018 intel_reduce_m_n_ratio(ret_m, ret_n);
5022 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
5023 int pixel_clock, int link_clock,
5024 struct intel_link_m_n *m_n,
5025 bool constant_n, bool fec_enable)
5027 u32 data_clock = bits_per_pixel * pixel_clock;
5030 data_clock = intel_dp_mode_to_fec_clock(data_clock);
5033 compute_m_n(data_clock,
5034 link_clock * nlanes * 8,
5035 &m_n->gmch_m, &m_n->gmch_n,
5038 compute_m_n(pixel_clock, link_clock,
5039 &m_n->link_m, &m_n->link_n,
5043 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
5046 * There may be no VBT; and if the BIOS enabled SSC we can
5047 * just keep using it to avoid unnecessary flicker. Whereas if the
5048 * BIOS isn't using it, don't assume it will work even if the VBT
5049 * indicates as much.
5051 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5052 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
5056 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
5057 drm_dbg_kms(&dev_priv->drm,
5058 "SSC %s by BIOS, overriding VBT which says %s\n",
5059 enableddisabled(bios_lvds_use_ssc),
5060 enableddisabled(dev_priv->vbt.lvds_use_ssc));
5061 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
5066 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5067 const struct intel_link_m_n *m_n)
5069 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5070 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5071 enum pipe pipe = crtc->pipe;
5073 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
5074 TU_SIZE(m_n->tu) | m_n->gmch_m);
5075 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5076 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5077 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5080 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
5081 enum transcoder transcoder)
5083 if (IS_HASWELL(dev_priv))
5084 return transcoder == TRANSCODER_EDP;
5087 * Strictly speaking some registers are available before
5088 * gen7, but we only support DRRS on gen7+
5090 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
5093 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5094 const struct intel_link_m_n *m_n,
5095 const struct intel_link_m_n *m2_n2)
5097 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5098 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5099 enum pipe pipe = crtc->pipe;
5100 enum transcoder transcoder = crtc_state->cpu_transcoder;
5102 if (INTEL_GEN(dev_priv) >= 5) {
5103 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
5104 TU_SIZE(m_n->tu) | m_n->gmch_m);
5105 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
5107 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
5109 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
5112 * M2_N2 registers are set only if DRRS is supported
5113 * (to make sure the registers are not unnecessarily accessed).
5115 if (m2_n2 && crtc_state->has_drrs &&
5116 transcoder_has_m2_n2(dev_priv, transcoder)) {
5117 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
5118 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5119 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
5121 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
5123 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
5127 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
5128 TU_SIZE(m_n->tu) | m_n->gmch_m);
5129 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5130 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
5131 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
5135 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
5137 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5138 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
5141 dp_m_n = &crtc_state->dp_m_n;
5142 dp_m2_n2 = &crtc_state->dp_m2_n2;
5143 } else if (m_n == M2_N2) {
5146 * M2_N2 registers are not supported. Hence m2_n2 divider value
5147 * needs to be programmed into M1_N1.
5149 dp_m_n = &crtc_state->dp_m2_n2;
5151 drm_err(&i915->drm, "Unsupported divider value\n");
5155 if (crtc_state->has_pch_encoder)
5156 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
5158 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
5161 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
5163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5165 enum pipe pipe = crtc->pipe;
5166 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5167 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
5168 u32 crtc_vtotal, crtc_vblank_end;
5171 /* We need to be careful not to changed the adjusted mode, for otherwise
5172 * the hw state checker will get angry at the mismatch. */
5173 crtc_vtotal = adjusted_mode->crtc_vtotal;
5174 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5176 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5177 /* the chip adds 2 halflines automatically */
5179 crtc_vblank_end -= 1;
5181 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5182 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5184 vsyncshift = adjusted_mode->crtc_hsync_start -
5185 adjusted_mode->crtc_htotal / 2;
5187 vsyncshift += adjusted_mode->crtc_htotal;
5190 if (INTEL_GEN(dev_priv) > 3)
5191 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
5194 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
5195 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
5196 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
5197 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
5198 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
5199 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
5201 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
5202 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
5203 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
5204 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
5205 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
5206 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
5208 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5209 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5210 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5212 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
5213 (pipe == PIPE_B || pipe == PIPE_C))
5214 intel_de_write(dev_priv, VTOTAL(pipe),
5215 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5219 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
5221 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5222 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5223 enum pipe pipe = crtc->pipe;
5225 /* pipesrc controls the size that is scaled from, which should
5226 * always be the user's requested size.
5228 intel_de_write(dev_priv, PIPESRC(pipe),
5229 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
5232 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
5234 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5235 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5237 if (IS_GEN(dev_priv, 2))
5240 if (INTEL_GEN(dev_priv) >= 9 ||
5241 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
5242 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
5244 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
5247 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
5248 struct intel_crtc_state *pipe_config)
5250 struct drm_device *dev = crtc->base.dev;
5251 struct drm_i915_private *dev_priv = to_i915(dev);
5252 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5255 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
5256 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5257 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5259 if (!transcoder_is_dsi(cpu_transcoder)) {
5260 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
5261 pipe_config->hw.adjusted_mode.crtc_hblank_start =
5263 pipe_config->hw.adjusted_mode.crtc_hblank_end =
5264 ((tmp >> 16) & 0xffff) + 1;
5266 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
5267 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5268 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5270 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
5271 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5272 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5274 if (!transcoder_is_dsi(cpu_transcoder)) {
5275 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
5276 pipe_config->hw.adjusted_mode.crtc_vblank_start =
5278 pipe_config->hw.adjusted_mode.crtc_vblank_end =
5279 ((tmp >> 16) & 0xffff) + 1;
5281 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
5282 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5283 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5285 if (intel_pipe_is_interlaced(pipe_config)) {
5286 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5287 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
5288 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
5292 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
5293 struct intel_crtc_state *pipe_config)
5295 struct drm_device *dev = crtc->base.dev;
5296 struct drm_i915_private *dev_priv = to_i915(dev);
5299 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
5300 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5301 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5304 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
5306 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5312 /* we keep both pipes enabled on 830 */
5313 if (IS_I830(dev_priv))
5314 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
5316 if (crtc_state->double_wide)
5317 pipeconf |= PIPECONF_DOUBLE_WIDE;
5319 /* only g4x and later have fancy bpc/dither controls */
5320 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5321 IS_CHERRYVIEW(dev_priv)) {
5322 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5323 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
5324 pipeconf |= PIPECONF_DITHER_EN |
5325 PIPECONF_DITHER_TYPE_SP;
5327 switch (crtc_state->pipe_bpp) {
5329 pipeconf |= PIPECONF_6BPC;
5332 pipeconf |= PIPECONF_8BPC;
5335 pipeconf |= PIPECONF_10BPC;
5338 /* Case prevented by intel_choose_pipe_bpp_dither. */
5343 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5344 if (INTEL_GEN(dev_priv) < 4 ||
5345 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5346 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5348 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5350 pipeconf |= PIPECONF_PROGRESSIVE;
5353 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5354 crtc_state->limited_color_range)
5355 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5357 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5359 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5361 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
5362 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
5365 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
5367 if (IS_I830(dev_priv))
5370 return INTEL_GEN(dev_priv) >= 4 ||
5371 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
5374 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
5376 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5377 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5380 if (!i9xx_has_pfit(dev_priv))
5383 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
5384 if (!(tmp & PFIT_ENABLE))
5387 /* Check whether the pfit is attached to our pipe. */
5388 if (INTEL_GEN(dev_priv) < 4) {
5389 if (crtc->pipe != PIPE_B)
5392 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5396 crtc_state->gmch_pfit.control = tmp;
5397 crtc_state->gmch_pfit.pgm_ratios =
5398 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
5401 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5402 struct intel_crtc_state *pipe_config)
5404 struct drm_device *dev = crtc->base.dev;
5405 struct drm_i915_private *dev_priv = to_i915(dev);
5406 enum pipe pipe = crtc->pipe;
5409 int refclk = 100000;
5411 /* In case of DSI, DPLL will not be used */
5412 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5415 vlv_dpio_get(dev_priv);
5416 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5417 vlv_dpio_put(dev_priv);
5419 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5420 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5421 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5422 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5423 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5425 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
5428 static void chv_crtc_clock_get(struct intel_crtc *crtc,
5429 struct intel_crtc_state *pipe_config)
5431 struct drm_device *dev = crtc->base.dev;
5432 struct drm_i915_private *dev_priv = to_i915(dev);
5433 enum pipe pipe = crtc->pipe;
5434 enum dpio_channel port = vlv_pipe_to_channel(pipe);
5436 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5437 int refclk = 100000;
5439 /* In case of DSI, DPLL will not be used */
5440 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5443 vlv_dpio_get(dev_priv);
5444 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
5445 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
5446 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
5447 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
5448 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
5449 vlv_dpio_put(dev_priv);
5451 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
5452 clock.m2 = (pll_dw0 & 0xff) << 22;
5453 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
5454 clock.m2 |= pll_dw2 & 0x3fffff;
5455 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
5456 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
5457 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
5459 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5462 static enum intel_output_format
5463 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
5465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5468 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5470 if (tmp & PIPEMISC_YUV420_ENABLE) {
5471 /* We support 4:2:0 in full blend mode only */
5472 drm_WARN_ON(&dev_priv->drm,
5473 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
5475 return INTEL_OUTPUT_FORMAT_YCBCR420;
5476 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
5477 return INTEL_OUTPUT_FORMAT_YCBCR444;
5479 return INTEL_OUTPUT_FORMAT_RGB;
5483 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
5485 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5486 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5488 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
5491 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
5493 if (tmp & DISPPLANE_GAMMA_ENABLE)
5494 crtc_state->gamma_enable = true;
5496 if (!HAS_GMCH(dev_priv) &&
5497 tmp & DISPPLANE_PIPE_CSC_ENABLE)
5498 crtc_state->csc_enable = true;
5501 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5502 struct intel_crtc_state *pipe_config)
5504 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5505 enum intel_display_power_domain power_domain;
5506 intel_wakeref_t wakeref;
5510 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5511 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5515 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5516 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5517 pipe_config->shared_dpll = NULL;
5521 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5522 if (!(tmp & PIPECONF_ENABLE))
5525 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5526 IS_CHERRYVIEW(dev_priv)) {
5527 switch (tmp & PIPECONF_BPC_MASK) {
5529 pipe_config->pipe_bpp = 18;
5532 pipe_config->pipe_bpp = 24;
5534 case PIPECONF_10BPC:
5535 pipe_config->pipe_bpp = 30;
5542 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5543 (tmp & PIPECONF_COLOR_RANGE_SELECT))
5544 pipe_config->limited_color_range = true;
5546 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5547 PIPECONF_GAMMA_MODE_SHIFT;
5549 if (IS_CHERRYVIEW(dev_priv))
5550 pipe_config->cgm_mode = intel_de_read(dev_priv,
5551 CGM_PIPE_MODE(crtc->pipe));
5553 i9xx_get_pipe_color_config(pipe_config);
5554 intel_color_get_config(pipe_config);
5556 if (INTEL_GEN(dev_priv) < 4)
5557 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5559 intel_get_transcoder_timings(crtc, pipe_config);
5560 intel_get_pipe_src_size(crtc, pipe_config);
5562 i9xx_get_pfit_config(pipe_config);
5564 if (INTEL_GEN(dev_priv) >= 4) {
5565 /* No way to read it out on pipes B and C */
5566 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5567 tmp = dev_priv->chv_dpll_md[crtc->pipe];
5569 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5570 pipe_config->pixel_multiplier =
5571 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5572 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5573 pipe_config->dpll_hw_state.dpll_md = tmp;
5574 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5575 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5576 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5577 pipe_config->pixel_multiplier =
5578 ((tmp & SDVO_MULTIPLIER_MASK)
5579 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5581 /* Note that on i915G/GM the pixel multiplier is in the sdvo
5582 * port and will be fixed up in the encoder->get_config
5584 pipe_config->pixel_multiplier = 1;
5586 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5588 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5589 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5591 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5594 /* Mask out read-only status bits. */
5595 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5596 DPLL_PORTC_READY_MASK |
5597 DPLL_PORTB_READY_MASK);
5600 if (IS_CHERRYVIEW(dev_priv))
5601 chv_crtc_clock_get(crtc, pipe_config);
5602 else if (IS_VALLEYVIEW(dev_priv))
5603 vlv_crtc_clock_get(crtc, pipe_config);
5605 i9xx_crtc_clock_get(crtc, pipe_config);
5608 * Normally the dotclock is filled in by the encoder .get_config()
5609 * but in case the pipe is enabled w/o any ports we need a sane
5612 pipe_config->hw.adjusted_mode.crtc_clock =
5613 pipe_config->port_clock / pipe_config->pixel_multiplier;
5618 intel_display_power_put(dev_priv, power_domain, wakeref);
5623 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5625 struct intel_encoder *encoder;
5628 bool has_lvds = false;
5629 bool has_cpu_edp = false;
5630 bool has_panel = false;
5631 bool has_ck505 = false;
5632 bool can_ssc = false;
5633 bool using_ssc_source = false;
5635 /* We need to take the global config into account */
5636 for_each_intel_encoder(&dev_priv->drm, encoder) {
5637 switch (encoder->type) {
5638 case INTEL_OUTPUT_LVDS:
5642 case INTEL_OUTPUT_EDP:
5644 if (encoder->port == PORT_A)
5652 if (HAS_PCH_IBX(dev_priv)) {
5653 has_ck505 = dev_priv->vbt.display_clock_mode;
5654 can_ssc = has_ck505;
5660 /* Check if any DPLLs are using the SSC source */
5661 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5662 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5664 if (!(temp & DPLL_VCO_ENABLE))
5667 if ((temp & PLL_REF_INPUT_MASK) ==
5668 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5669 using_ssc_source = true;
5674 drm_dbg_kms(&dev_priv->drm,
5675 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5676 has_panel, has_lvds, has_ck505, using_ssc_source);
5678 /* Ironlake: try to setup display ref clock before DPLL
5679 * enabling. This is only under driver's control after
5680 * PCH B stepping, previous chipset stepping should be
5681 * ignoring this setting.
5683 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5685 /* As we must carefully and slowly disable/enable each source in turn,
5686 * compute the final state we want first and check if we need to
5687 * make any changes at all.
5690 final &= ~DREF_NONSPREAD_SOURCE_MASK;
5692 final |= DREF_NONSPREAD_CK505_ENABLE;
5694 final |= DREF_NONSPREAD_SOURCE_ENABLE;
5696 final &= ~DREF_SSC_SOURCE_MASK;
5697 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5698 final &= ~DREF_SSC1_ENABLE;
5701 final |= DREF_SSC_SOURCE_ENABLE;
5703 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5704 final |= DREF_SSC1_ENABLE;
5707 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5708 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5710 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5712 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5713 } else if (using_ssc_source) {
5714 final |= DREF_SSC_SOURCE_ENABLE;
5715 final |= DREF_SSC1_ENABLE;
5721 /* Always enable nonspread source */
5722 val &= ~DREF_NONSPREAD_SOURCE_MASK;
5725 val |= DREF_NONSPREAD_CK505_ENABLE;
5727 val |= DREF_NONSPREAD_SOURCE_ENABLE;
5730 val &= ~DREF_SSC_SOURCE_MASK;
5731 val |= DREF_SSC_SOURCE_ENABLE;
5733 /* SSC must be turned on before enabling the CPU output */
5734 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5735 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5736 val |= DREF_SSC1_ENABLE;
5738 val &= ~DREF_SSC1_ENABLE;
5740 /* Get SSC going before enabling the outputs */
5741 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5742 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5745 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5747 /* Enable CPU source on CPU attached eDP */
5749 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5750 drm_dbg_kms(&dev_priv->drm,
5751 "Using SSC on eDP\n");
5752 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5754 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5756 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5758 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5759 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5762 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5764 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5766 /* Turn off CPU output */
5767 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5769 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5770 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5773 if (!using_ssc_source) {
5774 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5776 /* Turn off the SSC source */
5777 val &= ~DREF_SSC_SOURCE_MASK;
5778 val |= DREF_SSC_SOURCE_DISABLE;
5781 val &= ~DREF_SSC1_ENABLE;
5783 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5784 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5789 BUG_ON(val != final);
5792 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5796 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5797 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5798 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5800 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5801 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5802 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5804 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5805 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5806 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5808 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5809 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5810 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5813 /* WaMPhyProgramming:hsw */
5814 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5818 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5819 tmp &= ~(0xFF << 24);
5820 tmp |= (0x12 << 24);
5821 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5823 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5825 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5827 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5829 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5831 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5832 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5833 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5835 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5836 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5837 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5839 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5842 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5844 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5847 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5849 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5852 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5854 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5857 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5859 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5860 tmp &= ~(0xFF << 16);
5861 tmp |= (0x1C << 16);
5862 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5864 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5865 tmp &= ~(0xFF << 16);
5866 tmp |= (0x1C << 16);
5867 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5869 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5871 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5873 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5875 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5877 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5878 tmp &= ~(0xF << 28);
5880 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5882 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5883 tmp &= ~(0xF << 28);
5885 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5888 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5889 * Programming" based on the parameters passed:
5890 * - Sequence to enable CLKOUT_DP
5891 * - Sequence to enable CLKOUT_DP without spread
5892 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5894 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5895 bool with_spread, bool with_fdi)
5899 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5900 "FDI requires downspread\n"))
5902 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5903 with_fdi, "LP PCH doesn't have FDI\n"))
5906 mutex_lock(&dev_priv->sb_lock);
5908 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5909 tmp &= ~SBI_SSCCTL_DISABLE;
5910 tmp |= SBI_SSCCTL_PATHALT;
5911 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5916 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5917 tmp &= ~SBI_SSCCTL_PATHALT;
5918 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5921 lpt_reset_fdi_mphy(dev_priv);
5922 lpt_program_fdi_mphy(dev_priv);
5926 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5927 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5928 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5929 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5931 mutex_unlock(&dev_priv->sb_lock);
5934 /* Sequence to disable CLKOUT_DP */
5935 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5939 mutex_lock(&dev_priv->sb_lock);
5941 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5942 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5943 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5944 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5946 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5947 if (!(tmp & SBI_SSCCTL_DISABLE)) {
5948 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5949 tmp |= SBI_SSCCTL_PATHALT;
5950 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5953 tmp |= SBI_SSCCTL_DISABLE;
5954 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5957 mutex_unlock(&dev_priv->sb_lock);
5960 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5962 static const u16 sscdivintphase[] = {
5963 [BEND_IDX( 50)] = 0x3B23,
5964 [BEND_IDX( 45)] = 0x3B23,
5965 [BEND_IDX( 40)] = 0x3C23,
5966 [BEND_IDX( 35)] = 0x3C23,
5967 [BEND_IDX( 30)] = 0x3D23,
5968 [BEND_IDX( 25)] = 0x3D23,
5969 [BEND_IDX( 20)] = 0x3E23,
5970 [BEND_IDX( 15)] = 0x3E23,
5971 [BEND_IDX( 10)] = 0x3F23,
5972 [BEND_IDX( 5)] = 0x3F23,
5973 [BEND_IDX( 0)] = 0x0025,
5974 [BEND_IDX( -5)] = 0x0025,
5975 [BEND_IDX(-10)] = 0x0125,
5976 [BEND_IDX(-15)] = 0x0125,
5977 [BEND_IDX(-20)] = 0x0225,
5978 [BEND_IDX(-25)] = 0x0225,
5979 [BEND_IDX(-30)] = 0x0325,
5980 [BEND_IDX(-35)] = 0x0325,
5981 [BEND_IDX(-40)] = 0x0425,
5982 [BEND_IDX(-45)] = 0x0425,
5983 [BEND_IDX(-50)] = 0x0525,
5988 * steps -50 to 50 inclusive, in steps of 5
5989 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5990 * change in clock period = -(steps / 10) * 5.787 ps
5992 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5995 int idx = BEND_IDX(steps);
5997 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
6000 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
6003 mutex_lock(&dev_priv->sb_lock);
6005 if (steps % 10 != 0)
6009 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
6011 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
6013 tmp |= sscdivintphase[idx];
6014 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
6016 mutex_unlock(&dev_priv->sb_lock);
6021 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
6023 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
6024 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
6026 if ((ctl & SPLL_PLL_ENABLE) == 0)
6029 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
6030 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6033 if (IS_BROADWELL(dev_priv) &&
6034 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
6040 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
6041 enum intel_dpll_id id)
6043 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
6044 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
6046 if ((ctl & WRPLL_PLL_ENABLE) == 0)
6049 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
6052 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
6053 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
6054 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6060 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
6062 struct intel_encoder *encoder;
6063 bool has_fdi = false;
6065 for_each_intel_encoder(&dev_priv->drm, encoder) {
6066 switch (encoder->type) {
6067 case INTEL_OUTPUT_ANALOG:
6076 * The BIOS may have decided to use the PCH SSC
6077 * reference so we must not disable it until the
6078 * relevant PLLs have stopped relying on it. We'll
6079 * just leave the PCH SSC reference enabled in case
6080 * any active PLL is using it. It will get disabled
6081 * after runtime suspend if we don't have FDI.
6083 * TODO: Move the whole reference clock handling
6084 * to the modeset sequence proper so that we can
6085 * actually enable/disable/reconfigure these things
6086 * safely. To do that we need to introduce a real
6087 * clock hierarchy. That would also allow us to do
6088 * clock bending finally.
6090 dev_priv->pch_ssc_use = 0;
6092 if (spll_uses_pch_ssc(dev_priv)) {
6093 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
6094 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
6097 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
6098 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
6099 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
6102 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
6103 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
6104 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
6107 if (dev_priv->pch_ssc_use)
6111 lpt_bend_clkout_dp(dev_priv, 0);
6112 lpt_enable_clkout_dp(dev_priv, true, true);
6114 lpt_disable_clkout_dp(dev_priv);
6119 * Initialize reference clocks when the driver loads
6121 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
6123 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
6124 ilk_init_pch_refclk(dev_priv);
6125 else if (HAS_PCH_LPT(dev_priv))
6126 lpt_init_pch_refclk(dev_priv);
6129 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
6131 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6132 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6133 enum pipe pipe = crtc->pipe;
6138 switch (crtc_state->pipe_bpp) {
6140 val |= PIPECONF_6BPC;
6143 val |= PIPECONF_8BPC;
6146 val |= PIPECONF_10BPC;
6149 val |= PIPECONF_12BPC;
6152 /* Case prevented by intel_choose_pipe_bpp_dither. */
6156 if (crtc_state->dither)
6157 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6159 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6160 val |= PIPECONF_INTERLACED_ILK;
6162 val |= PIPECONF_PROGRESSIVE;
6165 * This would end up with an odd purple hue over
6166 * the entire display. Make sure we don't do it.
6168 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
6169 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
6171 if (crtc_state->limited_color_range &&
6172 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6173 val |= PIPECONF_COLOR_RANGE_SELECT;
6175 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6176 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
6178 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
6180 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
6182 intel_de_write(dev_priv, PIPECONF(pipe), val);
6183 intel_de_posting_read(dev_priv, PIPECONF(pipe));
6186 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
6188 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6189 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6190 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6193 if (IS_HASWELL(dev_priv) && crtc_state->dither)
6194 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6196 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6197 val |= PIPECONF_INTERLACED_ILK;
6199 val |= PIPECONF_PROGRESSIVE;
6201 if (IS_HASWELL(dev_priv) &&
6202 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6203 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
6205 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
6206 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
6209 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
6211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6215 switch (crtc_state->pipe_bpp) {
6217 val |= PIPEMISC_DITHER_6_BPC;
6220 val |= PIPEMISC_DITHER_8_BPC;
6223 val |= PIPEMISC_DITHER_10_BPC;
6226 val |= PIPEMISC_DITHER_12_BPC;
6229 MISSING_CASE(crtc_state->pipe_bpp);
6233 if (crtc_state->dither)
6234 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6236 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6237 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
6238 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
6240 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
6241 val |= PIPEMISC_YUV420_ENABLE |
6242 PIPEMISC_YUV420_MODE_FULL_BLEND;
6244 if (INTEL_GEN(dev_priv) >= 11 &&
6245 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
6246 BIT(PLANE_CURSOR))) == 0)
6247 val |= PIPEMISC_HDR_MODE_PRECISION;
6249 if (INTEL_GEN(dev_priv) >= 12)
6250 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
6252 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
6255 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
6257 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6260 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
6262 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
6263 case PIPEMISC_DITHER_6_BPC:
6265 case PIPEMISC_DITHER_8_BPC:
6267 case PIPEMISC_DITHER_10_BPC:
6269 case PIPEMISC_DITHER_12_BPC:
6277 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
6280 * Account for spread spectrum to avoid
6281 * oversubscribing the link. Max center spread
6282 * is 2.5%; use 5% for safety's sake.
6284 u32 bps = target_clock * bpp * 21 / 20;
6285 return DIV_ROUND_UP(bps, link_bw * 8);
6288 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6289 struct intel_link_m_n *m_n)
6291 struct drm_device *dev = crtc->base.dev;
6292 struct drm_i915_private *dev_priv = to_i915(dev);
6293 enum pipe pipe = crtc->pipe;
6295 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
6296 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
6297 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6299 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
6300 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6301 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6304 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6305 enum transcoder transcoder,
6306 struct intel_link_m_n *m_n,
6307 struct intel_link_m_n *m2_n2)
6309 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6310 enum pipe pipe = crtc->pipe;
6312 if (INTEL_GEN(dev_priv) >= 5) {
6313 m_n->link_m = intel_de_read(dev_priv,
6314 PIPE_LINK_M1(transcoder));
6315 m_n->link_n = intel_de_read(dev_priv,
6316 PIPE_LINK_N1(transcoder));
6317 m_n->gmch_m = intel_de_read(dev_priv,
6318 PIPE_DATA_M1(transcoder))
6320 m_n->gmch_n = intel_de_read(dev_priv,
6321 PIPE_DATA_N1(transcoder));
6322 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
6323 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6325 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
6326 m2_n2->link_m = intel_de_read(dev_priv,
6327 PIPE_LINK_M2(transcoder));
6328 m2_n2->link_n = intel_de_read(dev_priv,
6329 PIPE_LINK_N2(transcoder));
6330 m2_n2->gmch_m = intel_de_read(dev_priv,
6331 PIPE_DATA_M2(transcoder))
6333 m2_n2->gmch_n = intel_de_read(dev_priv,
6334 PIPE_DATA_N2(transcoder));
6335 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
6336 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6339 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
6340 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
6341 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6343 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
6344 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6345 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6349 void intel_dp_get_m_n(struct intel_crtc *crtc,
6350 struct intel_crtc_state *pipe_config)
6352 if (pipe_config->has_pch_encoder)
6353 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6355 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6356 &pipe_config->dp_m_n,
6357 &pipe_config->dp_m2_n2);
6360 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
6361 struct intel_crtc_state *pipe_config)
6363 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6364 &pipe_config->fdi_m_n, NULL);
6367 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
6370 drm_rect_init(&crtc_state->pch_pfit.dst,
6371 pos >> 16, pos & 0xffff,
6372 size >> 16, size & 0xffff);
6375 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
6377 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6379 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
6383 /* find scaler attached to this pipe */
6384 for (i = 0; i < crtc->num_scalers; i++) {
6387 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
6388 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
6392 crtc_state->pch_pfit.enabled = true;
6394 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
6395 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
6397 ilk_get_pfit_pos_size(crtc_state, pos, size);
6399 scaler_state->scalers[i].in_use = true;
6403 scaler_state->scaler_id = id;
6405 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
6407 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
6410 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
6412 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6416 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
6417 if ((ctl & PF_ENABLE) == 0)
6420 crtc_state->pch_pfit.enabled = true;
6422 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
6423 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
6425 ilk_get_pfit_pos_size(crtc_state, pos, size);
6428 * We currently do not free assignements of panel fitters on
6429 * ivb/hsw (since we don't use the higher upscaling modes which
6430 * differentiates them) so just WARN about this case for now.
6432 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
6433 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
6436 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
6437 struct intel_crtc_state *pipe_config)
6439 struct drm_device *dev = crtc->base.dev;
6440 struct drm_i915_private *dev_priv = to_i915(dev);
6441 enum intel_display_power_domain power_domain;
6442 intel_wakeref_t wakeref;
6446 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6447 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6451 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6452 pipe_config->shared_dpll = NULL;
6455 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6456 if (!(tmp & PIPECONF_ENABLE))
6459 switch (tmp & PIPECONF_BPC_MASK) {
6461 pipe_config->pipe_bpp = 18;
6464 pipe_config->pipe_bpp = 24;
6466 case PIPECONF_10BPC:
6467 pipe_config->pipe_bpp = 30;
6469 case PIPECONF_12BPC:
6470 pipe_config->pipe_bpp = 36;
6476 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
6477 pipe_config->limited_color_range = true;
6479 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
6480 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
6481 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
6482 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6485 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6489 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
6490 PIPECONF_GAMMA_MODE_SHIFT;
6492 pipe_config->csc_mode = intel_de_read(dev_priv,
6493 PIPE_CSC_MODE(crtc->pipe));
6495 i9xx_get_pipe_color_config(pipe_config);
6496 intel_color_get_config(pipe_config);
6498 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6499 struct intel_shared_dpll *pll;
6500 enum intel_dpll_id pll_id;
6503 pipe_config->has_pch_encoder = true;
6505 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6506 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6507 FDI_DP_PORT_WIDTH_SHIFT) + 1;
6509 ilk_get_fdi_m_n_config(crtc, pipe_config);
6511 if (HAS_PCH_IBX(dev_priv)) {
6513 * The pipe->pch transcoder and pch transcoder->pll
6516 pll_id = (enum intel_dpll_id) crtc->pipe;
6518 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6519 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6520 pll_id = DPLL_ID_PCH_PLL_B;
6522 pll_id= DPLL_ID_PCH_PLL_A;
6525 pipe_config->shared_dpll =
6526 intel_get_shared_dpll_by_id(dev_priv, pll_id);
6527 pll = pipe_config->shared_dpll;
6529 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6530 &pipe_config->dpll_hw_state);
6531 drm_WARN_ON(dev, !pll_active);
6533 tmp = pipe_config->dpll_hw_state.dpll;
6534 pipe_config->pixel_multiplier =
6535 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6536 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6538 ilk_pch_clock_get(crtc, pipe_config);
6540 pipe_config->pixel_multiplier = 1;
6543 intel_get_transcoder_timings(crtc, pipe_config);
6544 intel_get_pipe_src_size(crtc, pipe_config);
6546 ilk_get_pfit_config(pipe_config);
6551 intel_display_power_put(dev_priv, power_domain, wakeref);
6556 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6557 struct intel_crtc_state *pipe_config,
6558 struct intel_display_power_domain_set *power_domain_set)
6560 struct drm_device *dev = crtc->base.dev;
6561 struct drm_i915_private *dev_priv = to_i915(dev);
6562 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6563 unsigned long enabled_panel_transcoders = 0;
6564 enum transcoder panel_transcoder;
6567 if (INTEL_GEN(dev_priv) >= 11)
6568 panel_transcoder_mask |=
6569 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6572 * The pipe->transcoder mapping is fixed with the exception of the eDP
6573 * and DSI transcoders handled below.
6575 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6578 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6579 * consistency and less surprising code; it's in always on power).
6581 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6582 panel_transcoder_mask) {
6583 bool force_thru = false;
6584 enum pipe trans_pipe;
6586 tmp = intel_de_read(dev_priv,
6587 TRANS_DDI_FUNC_CTL(panel_transcoder));
6588 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6592 * Log all enabled ones, only use the first one.
6594 * FIXME: This won't work for two separate DSI displays.
6596 enabled_panel_transcoders |= BIT(panel_transcoder);
6597 if (enabled_panel_transcoders != BIT(panel_transcoder))
6600 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6603 "unknown pipe linked to transcoder %s\n",
6604 transcoder_name(panel_transcoder));
6606 case TRANS_DDI_EDP_INPUT_A_ONOFF:
6609 case TRANS_DDI_EDP_INPUT_A_ON:
6610 trans_pipe = PIPE_A;
6612 case TRANS_DDI_EDP_INPUT_B_ONOFF:
6613 trans_pipe = PIPE_B;
6615 case TRANS_DDI_EDP_INPUT_C_ONOFF:
6616 trans_pipe = PIPE_C;
6618 case TRANS_DDI_EDP_INPUT_D_ONOFF:
6619 trans_pipe = PIPE_D;
6623 if (trans_pipe == crtc->pipe) {
6624 pipe_config->cpu_transcoder = panel_transcoder;
6625 pipe_config->pch_pfit.force_thru = force_thru;
6630 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6632 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6633 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6635 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6636 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6639 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6641 return tmp & PIPECONF_ENABLE;
6644 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6645 struct intel_crtc_state *pipe_config,
6646 struct intel_display_power_domain_set *power_domain_set)
6648 struct drm_device *dev = crtc->base.dev;
6649 struct drm_i915_private *dev_priv = to_i915(dev);
6650 enum transcoder cpu_transcoder;
6654 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6656 cpu_transcoder = TRANSCODER_DSI_A;
6658 cpu_transcoder = TRANSCODER_DSI_C;
6660 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6661 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6665 * The PLL needs to be enabled with a valid divider
6666 * configuration, otherwise accessing DSI registers will hang
6667 * the machine. See BSpec North Display Engine
6668 * registers/MIPI[BXT]. We can break out here early, since we
6669 * need the same DSI PLL to be enabled for both DSI ports.
6671 if (!bxt_dsi_pll_is_enabled(dev_priv))
6674 /* XXX: this works for video mode only */
6675 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6676 if (!(tmp & DPI_ENABLE))
6679 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6680 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6683 pipe_config->cpu_transcoder = cpu_transcoder;
6687 return transcoder_is_dsi(pipe_config->cpu_transcoder);
6690 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6691 struct intel_crtc_state *pipe_config)
6693 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6694 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6698 if (transcoder_is_dsi(cpu_transcoder)) {
6699 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6702 tmp = intel_de_read(dev_priv,
6703 TRANS_DDI_FUNC_CTL(cpu_transcoder));
6704 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6706 if (INTEL_GEN(dev_priv) >= 12)
6707 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6709 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6713 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6714 * DDI E. So just check whether this pipe is wired to DDI E and whether
6715 * the PCH transcoder is on.
6717 if (INTEL_GEN(dev_priv) < 9 &&
6718 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6719 pipe_config->has_pch_encoder = true;
6721 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6722 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6723 FDI_DP_PORT_WIDTH_SHIFT) + 1;
6725 ilk_get_fdi_m_n_config(crtc, pipe_config);
6729 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6730 struct intel_crtc_state *pipe_config)
6732 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6733 struct intel_display_power_domain_set power_domain_set = { };
6737 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6738 POWER_DOMAIN_PIPE(crtc->pipe)))
6741 pipe_config->shared_dpll = NULL;
6743 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6745 if (IS_GEN9_LP(dev_priv) &&
6746 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6747 drm_WARN_ON(&dev_priv->drm, active);
6751 intel_dsc_get_config(pipe_config);
6754 /* bigjoiner slave doesn't enable transcoder */
6755 if (!pipe_config->bigjoiner_slave)
6759 pipe_config->pixel_multiplier = 1;
6761 /* we cannot read out most state, so don't bother.. */
6762 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6763 } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6764 INTEL_GEN(dev_priv) >= 11) {
6765 hsw_get_ddi_port_state(crtc, pipe_config);
6766 intel_get_transcoder_timings(crtc, pipe_config);
6769 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6770 intel_vrr_get_config(crtc, pipe_config);
6772 intel_get_pipe_src_size(crtc, pipe_config);
6774 if (IS_HASWELL(dev_priv)) {
6775 u32 tmp = intel_de_read(dev_priv,
6776 PIPECONF(pipe_config->cpu_transcoder));
6778 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6779 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6781 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6783 pipe_config->output_format =
6784 bdw_get_pipemisc_output_format(crtc);
6787 pipe_config->gamma_mode = intel_de_read(dev_priv,
6788 GAMMA_MODE(crtc->pipe));
6790 pipe_config->csc_mode = intel_de_read(dev_priv,
6791 PIPE_CSC_MODE(crtc->pipe));
6793 if (INTEL_GEN(dev_priv) >= 9) {
6794 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6796 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6797 pipe_config->gamma_enable = true;
6799 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6800 pipe_config->csc_enable = true;
6802 i9xx_get_pipe_color_config(pipe_config);
6805 intel_color_get_config(pipe_config);
6807 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6808 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6809 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6810 pipe_config->ips_linetime =
6811 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6813 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6814 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6815 if (INTEL_GEN(dev_priv) >= 9)
6816 skl_get_pfit_config(pipe_config);
6818 ilk_get_pfit_config(pipe_config);
6821 if (hsw_crtc_supports_ips(crtc)) {
6822 if (IS_HASWELL(dev_priv))
6823 pipe_config->ips_enabled = intel_de_read(dev_priv,
6824 IPS_CTL) & IPS_ENABLE;
6827 * We cannot readout IPS state on broadwell, set to
6828 * true so we can set it to a defined state on first
6831 pipe_config->ips_enabled = true;
6835 if (pipe_config->bigjoiner_slave) {
6836 /* Cannot be read out as a slave, set to 0. */
6837 pipe_config->pixel_multiplier = 0;
6838 } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6839 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6840 pipe_config->pixel_multiplier =
6841 intel_de_read(dev_priv,
6842 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6844 pipe_config->pixel_multiplier = 1;
6848 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6853 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6855 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6856 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6858 if (!i915->display.get_pipe_config(crtc, crtc_state))
6861 crtc_state->hw.active = true;
6863 intel_crtc_readout_derived_state(crtc_state);
6868 /* VESA 640x480x72Hz mode to set on the pipe */
6869 static const struct drm_display_mode load_detect_mode = {
6870 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6871 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6874 struct drm_framebuffer *
6875 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6876 struct drm_mode_fb_cmd2 *mode_cmd)
6878 struct intel_framebuffer *intel_fb;
6881 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6883 return ERR_PTR(-ENOMEM);
6885 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6889 return &intel_fb->base;
6893 return ERR_PTR(ret);
6896 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6897 struct drm_crtc *crtc)
6899 struct drm_plane *plane;
6900 struct drm_plane_state *plane_state;
6903 ret = drm_atomic_add_affected_planes(state, crtc);
6907 for_each_new_plane_in_state(state, plane, plane_state, i) {
6908 if (plane_state->crtc != crtc)
6911 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6915 drm_atomic_set_fb_for_plane(plane_state, NULL);
6921 int intel_get_load_detect_pipe(struct drm_connector *connector,
6922 struct intel_load_detect_pipe *old,
6923 struct drm_modeset_acquire_ctx *ctx)
6925 struct intel_crtc *intel_crtc;
6926 struct intel_encoder *intel_encoder =
6927 intel_attached_encoder(to_intel_connector(connector));
6928 struct drm_crtc *possible_crtc;
6929 struct drm_encoder *encoder = &intel_encoder->base;
6930 struct drm_crtc *crtc = NULL;
6931 struct drm_device *dev = encoder->dev;
6932 struct drm_i915_private *dev_priv = to_i915(dev);
6933 struct drm_mode_config *config = &dev->mode_config;
6934 struct drm_atomic_state *state = NULL, *restore_state = NULL;
6935 struct drm_connector_state *connector_state;
6936 struct intel_crtc_state *crtc_state;
6939 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6940 connector->base.id, connector->name,
6941 encoder->base.id, encoder->name);
6943 old->restore_state = NULL;
6945 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6948 * Algorithm gets a little messy:
6950 * - if the connector already has an assigned crtc, use it (but make
6951 * sure it's on first)
6953 * - try to find the first unused crtc that can drive this connector,
6954 * and use that if we find one
6957 /* See if we already have a CRTC for this connector */
6958 if (connector->state->crtc) {
6959 crtc = connector->state->crtc;
6961 ret = drm_modeset_lock(&crtc->mutex, ctx);
6965 /* Make sure the crtc and connector are running */
6969 /* Find an unused one (if possible) */
6970 for_each_crtc(dev, possible_crtc) {
6972 if (!(encoder->possible_crtcs & (1 << i)))
6975 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
6979 if (possible_crtc->state->enable) {
6980 drm_modeset_unlock(&possible_crtc->mutex);
6984 crtc = possible_crtc;
6989 * If we didn't find an unused CRTC, don't use any.
6992 drm_dbg_kms(&dev_priv->drm,
6993 "no pipe available for load-detect\n");
6999 intel_crtc = to_intel_crtc(crtc);
7001 state = drm_atomic_state_alloc(dev);
7002 restore_state = drm_atomic_state_alloc(dev);
7003 if (!state || !restore_state) {
7008 state->acquire_ctx = ctx;
7009 restore_state->acquire_ctx = ctx;
7011 connector_state = drm_atomic_get_connector_state(state, connector);
7012 if (IS_ERR(connector_state)) {
7013 ret = PTR_ERR(connector_state);
7017 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
7021 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7022 if (IS_ERR(crtc_state)) {
7023 ret = PTR_ERR(crtc_state);
7027 crtc_state->uapi.active = true;
7029 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
7034 ret = intel_modeset_disable_planes(state, crtc);
7038 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
7040 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
7042 ret = drm_atomic_add_affected_planes(restore_state, crtc);
7044 drm_dbg_kms(&dev_priv->drm,
7045 "Failed to create a copy of old state to restore: %i\n",
7050 ret = drm_atomic_commit(state);
7052 drm_dbg_kms(&dev_priv->drm,
7053 "failed to set mode on load-detect pipe\n");
7057 old->restore_state = restore_state;
7058 drm_atomic_state_put(state);
7060 /* let the connector get through one full cycle before testing */
7061 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
7066 drm_atomic_state_put(state);
7069 if (restore_state) {
7070 drm_atomic_state_put(restore_state);
7071 restore_state = NULL;
7074 if (ret == -EDEADLK)
7080 void intel_release_load_detect_pipe(struct drm_connector *connector,
7081 struct intel_load_detect_pipe *old,
7082 struct drm_modeset_acquire_ctx *ctx)
7084 struct intel_encoder *intel_encoder =
7085 intel_attached_encoder(to_intel_connector(connector));
7086 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
7087 struct drm_encoder *encoder = &intel_encoder->base;
7088 struct drm_atomic_state *state = old->restore_state;
7091 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7092 connector->base.id, connector->name,
7093 encoder->base.id, encoder->name);
7098 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
7100 drm_dbg_kms(&i915->drm,
7101 "Couldn't release load detect pipe: %i\n", ret);
7102 drm_atomic_state_put(state);
7105 static int i9xx_pll_refclk(struct drm_device *dev,
7106 const struct intel_crtc_state *pipe_config)
7108 struct drm_i915_private *dev_priv = to_i915(dev);
7109 u32 dpll = pipe_config->dpll_hw_state.dpll;
7111 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7112 return dev_priv->vbt.lvds_ssc_freq;
7113 else if (HAS_PCH_SPLIT(dev_priv))
7115 else if (!IS_GEN(dev_priv, 2))
7121 /* Returns the clock of the currently programmed mode of the given pipe. */
7122 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7123 struct intel_crtc_state *pipe_config)
7125 struct drm_device *dev = crtc->base.dev;
7126 struct drm_i915_private *dev_priv = to_i915(dev);
7127 enum pipe pipe = crtc->pipe;
7128 u32 dpll = pipe_config->dpll_hw_state.dpll;
7132 int refclk = i9xx_pll_refclk(dev, pipe_config);
7134 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7135 fp = pipe_config->dpll_hw_state.fp0;
7137 fp = pipe_config->dpll_hw_state.fp1;
7139 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7140 if (IS_PINEVIEW(dev_priv)) {
7141 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7142 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7144 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7145 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7148 if (!IS_GEN(dev_priv, 2)) {
7149 if (IS_PINEVIEW(dev_priv))
7150 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7151 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7153 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7154 DPLL_FPA01_P1_POST_DIV_SHIFT);
7156 switch (dpll & DPLL_MODE_MASK) {
7157 case DPLLB_MODE_DAC_SERIAL:
7158 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7161 case DPLLB_MODE_LVDS:
7162 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7166 drm_dbg_kms(&dev_priv->drm,
7167 "Unknown DPLL mode %08x in programmed "
7168 "mode\n", (int)(dpll & DPLL_MODE_MASK));
7172 if (IS_PINEVIEW(dev_priv))
7173 port_clock = pnv_calc_dpll_params(refclk, &clock);
7175 port_clock = i9xx_calc_dpll_params(refclk, &clock);
7177 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
7179 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7182 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7183 DPLL_FPA01_P1_POST_DIV_SHIFT);
7185 if (lvds & LVDS_CLKB_POWER_UP)
7190 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7193 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7194 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7196 if (dpll & PLL_P2_DIVIDE_BY_4)
7202 port_clock = i9xx_calc_dpll_params(refclk, &clock);
7206 * This value includes pixel_multiplier. We will use
7207 * port_clock to compute adjusted_mode.crtc_clock in the
7208 * encoder's get_config() function.
7210 pipe_config->port_clock = port_clock;
7213 int intel_dotclock_calculate(int link_freq,
7214 const struct intel_link_m_n *m_n)
7217 * The calculation for the data clock is:
7218 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7219 * But we want to avoid losing precison if possible, so:
7220 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7222 * and the link clock is simpler:
7223 * link_clock = (m * link_clock) / n
7229 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
7232 static void ilk_pch_clock_get(struct intel_crtc *crtc,
7233 struct intel_crtc_state *pipe_config)
7235 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7237 /* read out port_clock from the DPLL */
7238 i9xx_crtc_clock_get(crtc, pipe_config);
7241 * In case there is an active pipe without active ports,
7242 * we may need some idea for the dotclock anyway.
7243 * Calculate one based on the FDI configuration.
7245 pipe_config->hw.adjusted_mode.crtc_clock =
7246 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
7247 &pipe_config->fdi_m_n);
7250 /* Returns the currently programmed mode of the given encoder. */
7251 struct drm_display_mode *
7252 intel_encoder_current_mode(struct intel_encoder *encoder)
7254 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
7255 struct intel_crtc_state *crtc_state;
7256 struct drm_display_mode *mode;
7257 struct intel_crtc *crtc;
7260 if (!encoder->get_hw_state(encoder, &pipe))
7263 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7265 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7269 crtc_state = intel_crtc_state_alloc(crtc);
7275 if (!intel_crtc_get_pipe_config(crtc_state)) {
7281 intel_encoder_get_config(encoder, crtc_state);
7283 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
7291 * intel_wm_need_update - Check whether watermarks need updating
7292 * @cur: current plane state
7293 * @new: new plane state
7295 * Check current plane state versus the new one to determine whether
7296 * watermarks need to be recalculated.
7298 * Returns true or false.
7300 static bool intel_wm_need_update(const struct intel_plane_state *cur,
7301 struct intel_plane_state *new)
7303 /* Update watermarks on tiling or size changes. */
7304 if (new->uapi.visible != cur->uapi.visible)
7307 if (!cur->hw.fb || !new->hw.fb)
7310 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
7311 cur->hw.rotation != new->hw.rotation ||
7312 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
7313 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
7314 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
7315 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
7321 static bool needs_scaling(const struct intel_plane_state *state)
7323 int src_w = drm_rect_width(&state->uapi.src) >> 16;
7324 int src_h = drm_rect_height(&state->uapi.src) >> 16;
7325 int dst_w = drm_rect_width(&state->uapi.dst);
7326 int dst_h = drm_rect_height(&state->uapi.dst);
7328 return (src_w != dst_w || src_h != dst_h);
7331 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
7332 struct intel_crtc_state *crtc_state,
7333 const struct intel_plane_state *old_plane_state,
7334 struct intel_plane_state *plane_state)
7336 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7337 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7338 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7339 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7340 bool was_crtc_enabled = old_crtc_state->hw.active;
7341 bool is_crtc_enabled = crtc_state->hw.active;
7342 bool turn_off, turn_on, visible, was_visible;
7345 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
7346 ret = skl_update_scaler_plane(crtc_state, plane_state);
7351 was_visible = old_plane_state->uapi.visible;
7352 visible = plane_state->uapi.visible;
7354 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
7355 was_visible = false;
7358 * Visibility is calculated as if the crtc was on, but
7359 * after scaler setup everything depends on it being off
7360 * when the crtc isn't active.
7362 * FIXME this is wrong for watermarks. Watermarks should also
7363 * be computed as if the pipe would be active. Perhaps move
7364 * per-plane wm computation to the .check_plane() hook, and
7365 * only combine the results from all planes in the current place?
7367 if (!is_crtc_enabled) {
7368 intel_plane_set_invisible(crtc_state, plane_state);
7372 if (!was_visible && !visible)
7375 turn_off = was_visible && (!visible || mode_changed);
7376 turn_on = visible && (!was_visible || mode_changed);
7378 drm_dbg_atomic(&dev_priv->drm,
7379 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
7380 crtc->base.base.id, crtc->base.name,
7381 plane->base.base.id, plane->base.name,
7382 was_visible, visible,
7383 turn_off, turn_on, mode_changed);
7386 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7387 crtc_state->update_wm_pre = true;
7389 /* must disable cxsr around plane enable/disable */
7390 if (plane->id != PLANE_CURSOR)
7391 crtc_state->disable_cxsr = true;
7392 } else if (turn_off) {
7393 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7394 crtc_state->update_wm_post = true;
7396 /* must disable cxsr around plane enable/disable */
7397 if (plane->id != PLANE_CURSOR)
7398 crtc_state->disable_cxsr = true;
7399 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
7400 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
7401 /* FIXME bollocks */
7402 crtc_state->update_wm_pre = true;
7403 crtc_state->update_wm_post = true;
7407 if (visible || was_visible)
7408 crtc_state->fb_bits |= plane->frontbuffer_bit;
7411 * ILK/SNB DVSACNTR/Sprite Enable
7412 * IVB SPR_CTL/Sprite Enable
7413 * "When in Self Refresh Big FIFO mode, a write to enable the
7414 * plane will be internally buffered and delayed while Big FIFO
7417 * Which means that enabling the sprite can take an extra frame
7418 * when we start in big FIFO mode (LP1+). Thus we need to drop
7419 * down to LP0 and wait for vblank in order to make sure the
7420 * sprite gets enabled on the next vblank after the register write.
7421 * Doing otherwise would risk enabling the sprite one frame after
7422 * we've already signalled flip completion. We can resume LP1+
7423 * once the sprite has been enabled.
7426 * WaCxSRDisabledForSpriteScaling:ivb
7427 * IVB SPR_SCALE/Scaling Enable
7428 * "Low Power watermarks must be disabled for at least one
7429 * frame before enabling sprite scaling, and kept disabled
7430 * until sprite scaling is disabled."
7432 * ILK/SNB DVSASCALE/Scaling Enable
7433 * "When in Self Refresh Big FIFO mode, scaling enable will be
7434 * masked off while Big FIFO mode is exiting."
7436 * Despite the w/a only being listed for IVB we assume that
7437 * the ILK/SNB note has similar ramifications, hence we apply
7438 * the w/a on all three platforms.
7440 * With experimental results seems this is needed also for primary
7441 * plane, not only sprite plane.
7443 if (plane->id != PLANE_CURSOR &&
7444 (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
7445 IS_IVYBRIDGE(dev_priv)) &&
7446 (turn_on || (!needs_scaling(old_plane_state) &&
7447 needs_scaling(plane_state))))
7448 crtc_state->disable_lp_wm = true;
7453 static bool encoders_cloneable(const struct intel_encoder *a,
7454 const struct intel_encoder *b)
7456 /* masks could be asymmetric, so check both ways */
7457 return a == b || (a->cloneable & (1 << b->type) &&
7458 b->cloneable & (1 << a->type));
7461 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
7462 struct intel_crtc *crtc,
7463 struct intel_encoder *encoder)
7465 struct intel_encoder *source_encoder;
7466 struct drm_connector *connector;
7467 struct drm_connector_state *connector_state;
7470 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7471 if (connector_state->crtc != &crtc->base)
7475 to_intel_encoder(connector_state->best_encoder);
7476 if (!encoders_cloneable(encoder, source_encoder))
7483 static int icl_add_linked_planes(struct intel_atomic_state *state)
7485 struct intel_plane *plane, *linked;
7486 struct intel_plane_state *plane_state, *linked_plane_state;
7489 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7490 linked = plane_state->planar_linked_plane;
7495 linked_plane_state = intel_atomic_get_plane_state(state, linked);
7496 if (IS_ERR(linked_plane_state))
7497 return PTR_ERR(linked_plane_state);
7499 drm_WARN_ON(state->base.dev,
7500 linked_plane_state->planar_linked_plane != plane);
7501 drm_WARN_ON(state->base.dev,
7502 linked_plane_state->planar_slave == plane_state->planar_slave);
7508 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7510 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7512 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7513 struct intel_plane *plane, *linked;
7514 struct intel_plane_state *plane_state;
7517 if (INTEL_GEN(dev_priv) < 11)
7521 * Destroy all old plane links and make the slave plane invisible
7522 * in the crtc_state->active_planes mask.
7524 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7525 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7528 plane_state->planar_linked_plane = NULL;
7529 if (plane_state->planar_slave && !plane_state->uapi.visible) {
7530 crtc_state->enabled_planes &= ~BIT(plane->id);
7531 crtc_state->active_planes &= ~BIT(plane->id);
7532 crtc_state->update_planes |= BIT(plane->id);
7535 plane_state->planar_slave = false;
7538 if (!crtc_state->nv12_planes)
7541 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7542 struct intel_plane_state *linked_state = NULL;
7544 if (plane->pipe != crtc->pipe ||
7545 !(crtc_state->nv12_planes & BIT(plane->id)))
7548 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7549 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7552 if (crtc_state->active_planes & BIT(linked->id))
7555 linked_state = intel_atomic_get_plane_state(state, linked);
7556 if (IS_ERR(linked_state))
7557 return PTR_ERR(linked_state);
7562 if (!linked_state) {
7563 drm_dbg_kms(&dev_priv->drm,
7564 "Need %d free Y planes for planar YUV\n",
7565 hweight8(crtc_state->nv12_planes));
7570 plane_state->planar_linked_plane = linked;
7572 linked_state->planar_slave = true;
7573 linked_state->planar_linked_plane = plane;
7574 crtc_state->enabled_planes |= BIT(linked->id);
7575 crtc_state->active_planes |= BIT(linked->id);
7576 crtc_state->update_planes |= BIT(linked->id);
7577 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7578 linked->base.name, plane->base.name);
7580 /* Copy parameters to slave plane */
7581 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7582 linked_state->color_ctl = plane_state->color_ctl;
7583 linked_state->view = plane_state->view;
7584 memcpy(linked_state->color_plane, plane_state->color_plane,
7585 sizeof(linked_state->color_plane));
7587 intel_plane_copy_hw_state(linked_state, plane_state);
7588 linked_state->uapi.src = plane_state->uapi.src;
7589 linked_state->uapi.dst = plane_state->uapi.dst;
7591 if (icl_is_hdr_plane(dev_priv, plane->id)) {
7592 if (linked->id == PLANE_SPRITE5)
7593 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7594 else if (linked->id == PLANE_SPRITE4)
7595 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7596 else if (linked->id == PLANE_SPRITE3)
7597 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7598 else if (linked->id == PLANE_SPRITE2)
7599 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7601 MISSING_CASE(linked->id);
7608 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7610 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7611 struct intel_atomic_state *state =
7612 to_intel_atomic_state(new_crtc_state->uapi.state);
7613 const struct intel_crtc_state *old_crtc_state =
7614 intel_atomic_get_old_crtc_state(state, crtc);
7616 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7619 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7621 const struct drm_display_mode *pipe_mode =
7622 &crtc_state->hw.pipe_mode;
7625 if (!crtc_state->hw.enable)
7628 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7629 pipe_mode->crtc_clock);
7631 return min(linetime_wm, 0x1ff);
7634 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7635 const struct intel_cdclk_state *cdclk_state)
7637 const struct drm_display_mode *pipe_mode =
7638 &crtc_state->hw.pipe_mode;
7641 if (!crtc_state->hw.enable)
7644 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7645 cdclk_state->logical.cdclk);
7647 return min(linetime_wm, 0x1ff);
7650 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7652 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7653 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7654 const struct drm_display_mode *pipe_mode =
7655 &crtc_state->hw.pipe_mode;
7658 if (!crtc_state->hw.enable)
7661 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7662 crtc_state->pixel_rate);
7664 /* Display WA #1135: BXT:ALL GLK:ALL */
7665 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
7668 return min(linetime_wm, 0x1ff);
7671 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7672 struct intel_crtc *crtc)
7674 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7675 struct intel_crtc_state *crtc_state =
7676 intel_atomic_get_new_crtc_state(state, crtc);
7677 const struct intel_cdclk_state *cdclk_state;
7679 if (INTEL_GEN(dev_priv) >= 9)
7680 crtc_state->linetime = skl_linetime_wm(crtc_state);
7682 crtc_state->linetime = hsw_linetime_wm(crtc_state);
7684 if (!hsw_crtc_supports_ips(crtc))
7687 cdclk_state = intel_atomic_get_cdclk_state(state);
7688 if (IS_ERR(cdclk_state))
7689 return PTR_ERR(cdclk_state);
7691 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7697 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7698 struct intel_crtc *crtc)
7700 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7701 struct intel_crtc_state *crtc_state =
7702 intel_atomic_get_new_crtc_state(state, crtc);
7703 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7706 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7707 mode_changed && !crtc_state->hw.active)
7708 crtc_state->update_wm_post = true;
7710 if (mode_changed && crtc_state->hw.enable &&
7711 dev_priv->display.crtc_compute_clock &&
7712 !crtc_state->bigjoiner_slave &&
7713 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7714 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7720 * May need to update pipe gamma enable bits
7721 * when C8 planes are getting enabled/disabled.
7723 if (c8_planes_changed(crtc_state))
7724 crtc_state->uapi.color_mgmt_changed = true;
7726 if (mode_changed || crtc_state->update_pipe ||
7727 crtc_state->uapi.color_mgmt_changed) {
7728 ret = intel_color_check(crtc_state);
7733 if (dev_priv->display.compute_pipe_wm) {
7734 ret = dev_priv->display.compute_pipe_wm(crtc_state);
7736 drm_dbg_kms(&dev_priv->drm,
7737 "Target pipe watermarks are invalid\n");
7742 if (dev_priv->display.compute_intermediate_wm) {
7743 if (drm_WARN_ON(&dev_priv->drm,
7744 !dev_priv->display.compute_pipe_wm))
7748 * Calculate 'intermediate' watermarks that satisfy both the
7749 * old state and the new state. We can program these
7752 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
7754 drm_dbg_kms(&dev_priv->drm,
7755 "No valid intermediate pipe watermarks are possible\n");
7760 if (INTEL_GEN(dev_priv) >= 9) {
7761 if (mode_changed || crtc_state->update_pipe) {
7762 ret = skl_update_scaler_crtc(crtc_state);
7767 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7772 if (HAS_IPS(dev_priv)) {
7773 ret = hsw_compute_ips_config(crtc_state);
7778 if (INTEL_GEN(dev_priv) >= 9 ||
7779 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7780 ret = hsw_compute_linetime_wm(state, crtc);
7786 if (!mode_changed) {
7787 ret = intel_psr2_sel_fetch_update(state, crtc);
7795 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7797 struct intel_connector *connector;
7798 struct drm_connector_list_iter conn_iter;
7800 drm_connector_list_iter_begin(dev, &conn_iter);
7801 for_each_intel_connector_iter(connector, &conn_iter) {
7802 struct drm_connector_state *conn_state = connector->base.state;
7803 struct intel_encoder *encoder =
7804 to_intel_encoder(connector->base.encoder);
7806 if (conn_state->crtc)
7807 drm_connector_put(&connector->base);
7810 struct intel_crtc *crtc =
7811 to_intel_crtc(encoder->base.crtc);
7812 const struct intel_crtc_state *crtc_state =
7813 to_intel_crtc_state(crtc->base.state);
7815 conn_state->best_encoder = &encoder->base;
7816 conn_state->crtc = &crtc->base;
7817 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7819 drm_connector_get(&connector->base);
7821 conn_state->best_encoder = NULL;
7822 conn_state->crtc = NULL;
7825 drm_connector_list_iter_end(&conn_iter);
7829 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7830 struct intel_crtc_state *pipe_config)
7832 struct drm_connector *connector = conn_state->connector;
7833 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7834 const struct drm_display_info *info = &connector->display_info;
7837 switch (conn_state->max_bpc) {
7851 MISSING_CASE(conn_state->max_bpc);
7855 if (bpp < pipe_config->pipe_bpp) {
7856 drm_dbg_kms(&i915->drm,
7857 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7858 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7859 connector->base.id, connector->name,
7861 3 * conn_state->max_requested_bpc,
7862 pipe_config->pipe_bpp);
7864 pipe_config->pipe_bpp = bpp;
7871 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7872 struct intel_crtc_state *pipe_config)
7874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7875 struct drm_atomic_state *state = pipe_config->uapi.state;
7876 struct drm_connector *connector;
7877 struct drm_connector_state *connector_state;
7880 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7881 IS_CHERRYVIEW(dev_priv)))
7883 else if (INTEL_GEN(dev_priv) >= 5)
7888 pipe_config->pipe_bpp = bpp;
7890 /* Clamp display bpp to connector max bpp */
7891 for_each_new_connector_in_state(state, connector, connector_state, i) {
7894 if (connector_state->crtc != &crtc->base)
7897 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7905 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7906 const struct drm_display_mode *mode)
7908 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7909 "type: 0x%x flags: 0x%x\n",
7911 mode->crtc_hdisplay, mode->crtc_hsync_start,
7912 mode->crtc_hsync_end, mode->crtc_htotal,
7913 mode->crtc_vdisplay, mode->crtc_vsync_start,
7914 mode->crtc_vsync_end, mode->crtc_vtotal,
7915 mode->type, mode->flags);
7919 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7920 const char *id, unsigned int lane_count,
7921 const struct intel_link_m_n *m_n)
7923 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7925 drm_dbg_kms(&i915->drm,
7926 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7928 m_n->gmch_m, m_n->gmch_n,
7929 m_n->link_m, m_n->link_n, m_n->tu);
7933 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7934 const union hdmi_infoframe *frame)
7936 if (!drm_debug_enabled(DRM_UT_KMS))
7939 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7943 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7944 const struct drm_dp_vsc_sdp *vsc)
7946 if (!drm_debug_enabled(DRM_UT_KMS))
7949 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7952 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7954 static const char * const output_type_str[] = {
7955 OUTPUT_TYPE(UNUSED),
7956 OUTPUT_TYPE(ANALOG),
7966 OUTPUT_TYPE(DP_MST),
7971 static void snprintf_output_types(char *buf, size_t len,
7972 unsigned int output_types)
7979 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7982 if ((output_types & BIT(i)) == 0)
7985 r = snprintf(str, len, "%s%s",
7986 str != buf ? "," : "", output_type_str[i]);
7992 output_types &= ~BIT(i);
7995 WARN_ON_ONCE(output_types != 0);
7998 static const char * const output_format_str[] = {
7999 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
8000 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
8001 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
8004 static const char *output_formats(enum intel_output_format format)
8006 if (format >= ARRAY_SIZE(output_format_str))
8008 return output_format_str[format];
8011 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
8013 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
8014 struct drm_i915_private *i915 = to_i915(plane->base.dev);
8015 const struct drm_framebuffer *fb = plane_state->hw.fb;
8016 struct drm_format_name_buf format_name;
8019 drm_dbg_kms(&i915->drm,
8020 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
8021 plane->base.base.id, plane->base.name,
8022 yesno(plane_state->uapi.visible));
8026 drm_dbg_kms(&i915->drm,
8027 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
8028 plane->base.base.id, plane->base.name,
8029 fb->base.id, fb->width, fb->height,
8030 drm_get_format_name(fb->format->format, &format_name),
8031 fb->modifier, yesno(plane_state->uapi.visible));
8032 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
8033 plane_state->hw.rotation, plane_state->scaler_id);
8034 if (plane_state->uapi.visible)
8035 drm_dbg_kms(&i915->drm,
8036 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
8037 DRM_RECT_FP_ARG(&plane_state->uapi.src),
8038 DRM_RECT_ARG(&plane_state->uapi.dst));
8041 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
8042 struct intel_atomic_state *state,
8043 const char *context)
8045 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8046 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8047 const struct intel_plane_state *plane_state;
8048 struct intel_plane *plane;
8052 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
8053 crtc->base.base.id, crtc->base.name,
8054 yesno(pipe_config->hw.enable), context);
8056 if (!pipe_config->hw.enable)
8059 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
8060 drm_dbg_kms(&dev_priv->drm,
8061 "active: %s, output_types: %s (0x%x), output format: %s\n",
8062 yesno(pipe_config->hw.active),
8063 buf, pipe_config->output_types,
8064 output_formats(pipe_config->output_format));
8066 drm_dbg_kms(&dev_priv->drm,
8067 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
8068 transcoder_name(pipe_config->cpu_transcoder),
8069 pipe_config->pipe_bpp, pipe_config->dither);
8071 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
8072 transcoder_name(pipe_config->mst_master_transcoder));
8074 drm_dbg_kms(&dev_priv->drm,
8075 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
8076 transcoder_name(pipe_config->master_transcoder),
8077 pipe_config->sync_mode_slaves_mask);
8079 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
8080 pipe_config->bigjoiner_slave ? "slave" :
8081 pipe_config->bigjoiner ? "master" : "no");
8083 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
8084 enableddisabled(pipe_config->splitter.enable),
8085 pipe_config->splitter.link_count,
8086 pipe_config->splitter.pixel_overlap);
8088 if (pipe_config->has_pch_encoder)
8089 intel_dump_m_n_config(pipe_config, "fdi",
8090 pipe_config->fdi_lanes,
8091 &pipe_config->fdi_m_n);
8093 if (intel_crtc_has_dp_encoder(pipe_config)) {
8094 intel_dump_m_n_config(pipe_config, "dp m_n",
8095 pipe_config->lane_count, &pipe_config->dp_m_n);
8096 if (pipe_config->has_drrs)
8097 intel_dump_m_n_config(pipe_config, "dp m2_n2",
8098 pipe_config->lane_count,
8099 &pipe_config->dp_m2_n2);
8102 drm_dbg_kms(&dev_priv->drm,
8103 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
8104 pipe_config->has_audio, pipe_config->has_infoframe,
8105 pipe_config->infoframes.enable);
8107 if (pipe_config->infoframes.enable &
8108 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
8109 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
8110 pipe_config->infoframes.gcp);
8111 if (pipe_config->infoframes.enable &
8112 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
8113 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
8114 if (pipe_config->infoframes.enable &
8115 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
8116 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
8117 if (pipe_config->infoframes.enable &
8118 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
8119 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
8120 if (pipe_config->infoframes.enable &
8121 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
8122 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8123 if (pipe_config->infoframes.enable &
8124 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
8125 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8126 if (pipe_config->infoframes.enable &
8127 intel_hdmi_infoframe_enable(DP_SDP_VSC))
8128 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
8130 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
8131 yesno(pipe_config->vrr.enable),
8132 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
8133 pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
8134 intel_vrr_vmin_vblank_start(pipe_config),
8135 intel_vrr_vmax_vblank_start(pipe_config));
8137 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
8138 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
8139 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
8140 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
8141 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
8142 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
8143 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
8144 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
8145 drm_dbg_kms(&dev_priv->drm,
8146 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
8147 pipe_config->port_clock,
8148 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
8149 pipe_config->pixel_rate);
8151 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
8152 pipe_config->linetime, pipe_config->ips_linetime);
8154 if (INTEL_GEN(dev_priv) >= 9)
8155 drm_dbg_kms(&dev_priv->drm,
8156 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
8158 pipe_config->scaler_state.scaler_users,
8159 pipe_config->scaler_state.scaler_id);
8161 if (HAS_GMCH(dev_priv))
8162 drm_dbg_kms(&dev_priv->drm,
8163 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8164 pipe_config->gmch_pfit.control,
8165 pipe_config->gmch_pfit.pgm_ratios,
8166 pipe_config->gmch_pfit.lvds_border_bits);
8168 drm_dbg_kms(&dev_priv->drm,
8169 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
8170 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
8171 enableddisabled(pipe_config->pch_pfit.enabled),
8172 yesno(pipe_config->pch_pfit.force_thru));
8174 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
8175 pipe_config->ips_enabled, pipe_config->double_wide);
8177 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
8179 if (IS_CHERRYVIEW(dev_priv))
8180 drm_dbg_kms(&dev_priv->drm,
8181 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8182 pipe_config->cgm_mode, pipe_config->gamma_mode,
8183 pipe_config->gamma_enable, pipe_config->csc_enable);
8185 drm_dbg_kms(&dev_priv->drm,
8186 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8187 pipe_config->csc_mode, pipe_config->gamma_mode,
8188 pipe_config->gamma_enable, pipe_config->csc_enable);
8190 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
8191 pipe_config->hw.degamma_lut ?
8192 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
8193 pipe_config->hw.gamma_lut ?
8194 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
8200 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8201 if (plane->pipe == crtc->pipe)
8202 intel_dump_plane_state(plane_state);
8206 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
8208 struct drm_device *dev = state->base.dev;
8209 struct drm_connector *connector;
8210 struct drm_connector_list_iter conn_iter;
8211 unsigned int used_ports = 0;
8212 unsigned int used_mst_ports = 0;
8216 * We're going to peek into connector->state,
8217 * hence connection_mutex must be held.
8219 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
8222 * Walk the connector list instead of the encoder
8223 * list to detect the problem on ddi platforms
8224 * where there's just one encoder per digital port.
8226 drm_connector_list_iter_begin(dev, &conn_iter);
8227 drm_for_each_connector_iter(connector, &conn_iter) {
8228 struct drm_connector_state *connector_state;
8229 struct intel_encoder *encoder;
8232 drm_atomic_get_new_connector_state(&state->base,
8234 if (!connector_state)
8235 connector_state = connector->state;
8237 if (!connector_state->best_encoder)
8240 encoder = to_intel_encoder(connector_state->best_encoder);
8242 drm_WARN_ON(dev, !connector_state->crtc);
8244 switch (encoder->type) {
8245 case INTEL_OUTPUT_DDI:
8246 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
8249 case INTEL_OUTPUT_DP:
8250 case INTEL_OUTPUT_HDMI:
8251 case INTEL_OUTPUT_EDP:
8252 /* the same port mustn't appear more than once */
8253 if (used_ports & BIT(encoder->port))
8256 used_ports |= BIT(encoder->port);
8258 case INTEL_OUTPUT_DP_MST:
8266 drm_connector_list_iter_end(&conn_iter);
8268 /* can't mix MST and SST/HDMI on the same port */
8269 if (used_ports & used_mst_ports)
8276 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
8277 struct intel_crtc_state *crtc_state)
8279 const struct intel_crtc_state *from_crtc_state = crtc_state;
8281 if (crtc_state->bigjoiner_slave) {
8282 from_crtc_state = intel_atomic_get_new_crtc_state(state,
8283 crtc_state->bigjoiner_linked_crtc);
8285 /* No need to copy state if the master state is unchanged */
8286 if (!from_crtc_state)
8290 intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
8294 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
8295 struct intel_crtc_state *crtc_state)
8297 crtc_state->hw.enable = crtc_state->uapi.enable;
8298 crtc_state->hw.active = crtc_state->uapi.active;
8299 crtc_state->hw.mode = crtc_state->uapi.mode;
8300 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
8301 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
8303 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
8306 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
8308 if (crtc_state->bigjoiner_slave)
8311 crtc_state->uapi.enable = crtc_state->hw.enable;
8312 crtc_state->uapi.active = crtc_state->hw.active;
8313 drm_WARN_ON(crtc_state->uapi.crtc->dev,
8314 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
8316 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
8317 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
8319 /* copy color blobs to uapi */
8320 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
8321 crtc_state->hw.degamma_lut);
8322 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
8323 crtc_state->hw.gamma_lut);
8324 drm_property_replace_blob(&crtc_state->uapi.ctm,
8325 crtc_state->hw.ctm);
8329 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
8330 const struct intel_crtc_state *from_crtc_state)
8332 struct intel_crtc_state *saved_state;
8333 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8335 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
8339 saved_state->uapi = crtc_state->uapi;
8340 saved_state->scaler_state = crtc_state->scaler_state;
8341 saved_state->shared_dpll = crtc_state->shared_dpll;
8342 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8343 saved_state->crc_enabled = crtc_state->crc_enabled;
8345 intel_crtc_free_hw_state(crtc_state);
8346 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8349 /* Re-init hw state */
8350 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
8351 crtc_state->hw.enable = from_crtc_state->hw.enable;
8352 crtc_state->hw.active = from_crtc_state->hw.active;
8353 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
8354 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
8357 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
8358 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
8359 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
8360 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
8361 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
8362 crtc_state->bigjoiner_slave = true;
8363 crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
8364 crtc_state->has_audio = false;
8370 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
8371 struct intel_crtc_state *crtc_state)
8373 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8374 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8375 struct intel_crtc_state *saved_state;
8377 saved_state = intel_crtc_state_alloc(crtc);
8381 /* free the old crtc_state->hw members */
8382 intel_crtc_free_hw_state(crtc_state);
8384 /* FIXME: before the switch to atomic started, a new pipe_config was
8385 * kzalloc'd. Code that depends on any field being zero should be
8386 * fixed, so that the crtc_state can be safely duplicated. For now,
8387 * only fields that are know to not cause problems are preserved. */
8389 saved_state->uapi = crtc_state->uapi;
8390 saved_state->scaler_state = crtc_state->scaler_state;
8391 saved_state->shared_dpll = crtc_state->shared_dpll;
8392 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8393 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
8394 sizeof(saved_state->icl_port_dplls));
8395 saved_state->crc_enabled = crtc_state->crc_enabled;
8396 if (IS_G4X(dev_priv) ||
8397 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8398 saved_state->wm = crtc_state->wm;
8400 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8403 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
8409 intel_modeset_pipe_config(struct intel_atomic_state *state,
8410 struct intel_crtc_state *pipe_config)
8412 struct drm_crtc *crtc = pipe_config->uapi.crtc;
8413 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8414 struct drm_connector *connector;
8415 struct drm_connector_state *connector_state;
8416 int base_bpp, ret, i;
8419 pipe_config->cpu_transcoder =
8420 (enum transcoder) to_intel_crtc(crtc)->pipe;
8423 * Sanitize sync polarity flags based on requested ones. If neither
8424 * positive or negative polarity is requested, treat this as meaning
8425 * negative polarity.
8427 if (!(pipe_config->hw.adjusted_mode.flags &
8428 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8429 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8431 if (!(pipe_config->hw.adjusted_mode.flags &
8432 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8433 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8435 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8440 base_bpp = pipe_config->pipe_bpp;
8443 * Determine the real pipe dimensions. Note that stereo modes can
8444 * increase the actual pipe size due to the frame doubling and
8445 * insertion of additional space for blanks between the frame. This
8446 * is stored in the crtc timings. We use the requested mode to do this
8447 * computation to clearly distinguish it from the adjusted mode, which
8448 * can be changed by the connectors in the below retry loop.
8450 drm_mode_get_hv_timing(&pipe_config->hw.mode,
8451 &pipe_config->pipe_src_w,
8452 &pipe_config->pipe_src_h);
8454 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8455 struct intel_encoder *encoder =
8456 to_intel_encoder(connector_state->best_encoder);
8458 if (connector_state->crtc != crtc)
8461 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
8462 drm_dbg_kms(&i915->drm,
8463 "rejecting invalid cloning configuration\n");
8468 * Determine output_types before calling the .compute_config()
8469 * hooks so that the hooks can use this information safely.
8471 if (encoder->compute_output_type)
8472 pipe_config->output_types |=
8473 BIT(encoder->compute_output_type(encoder, pipe_config,
8476 pipe_config->output_types |= BIT(encoder->type);
8480 /* Ensure the port clock defaults are reset when retrying. */
8481 pipe_config->port_clock = 0;
8482 pipe_config->pixel_multiplier = 1;
8484 /* Fill in default crtc timings, allow encoders to overwrite them. */
8485 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
8486 CRTC_STEREO_DOUBLE);
8488 /* Pass our mode to the connectors and the CRTC to give them a chance to
8489 * adjust it according to limitations or connector properties, and also
8490 * a chance to reject the mode entirely.
8492 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8493 struct intel_encoder *encoder =
8494 to_intel_encoder(connector_state->best_encoder);
8496 if (connector_state->crtc != crtc)
8499 ret = encoder->compute_config(encoder, pipe_config,
8502 if (ret != -EDEADLK)
8503 drm_dbg_kms(&i915->drm,
8504 "Encoder config failure: %d\n",
8510 /* Set default port clock if not overwritten by the encoder. Needs to be
8511 * done afterwards in case the encoder adjusts the mode. */
8512 if (!pipe_config->port_clock)
8513 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8514 * pipe_config->pixel_multiplier;
8516 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8517 if (ret == -EDEADLK)
8520 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8524 if (ret == I915_DISPLAY_CONFIG_RETRY) {
8525 if (drm_WARN(&i915->drm, !retry,
8526 "loop in pipe configuration computation\n"))
8529 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8534 /* Dithering seems to not pass-through bits correctly when it should, so
8535 * only enable it on 6bpc panels and when its not a compliance
8536 * test requesting 6bpc video pattern.
8538 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8539 !pipe_config->dither_force_disable;
8540 drm_dbg_kms(&i915->drm,
8541 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8542 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8548 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8550 struct intel_atomic_state *state =
8551 to_intel_atomic_state(crtc_state->uapi.state);
8552 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8553 struct drm_connector_state *conn_state;
8554 struct drm_connector *connector;
8557 for_each_new_connector_in_state(&state->base, connector,
8559 struct intel_encoder *encoder =
8560 to_intel_encoder(conn_state->best_encoder);
8563 if (conn_state->crtc != &crtc->base ||
8564 !encoder->compute_config_late)
8567 ret = encoder->compute_config_late(encoder, crtc_state,
8576 bool intel_fuzzy_clock_check(int clock1, int clock2)
8580 if (clock1 == clock2)
8583 if (!clock1 || !clock2)
8586 diff = abs(clock1 - clock2);
8588 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8595 intel_compare_m_n(unsigned int m, unsigned int n,
8596 unsigned int m2, unsigned int n2,
8599 if (m == m2 && n == n2)
8602 if (exact || !m || !n || !m2 || !n2)
8605 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8612 } else if (n < n2) {
8622 return intel_fuzzy_clock_check(m, m2);
8626 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8627 const struct intel_link_m_n *m2_n2,
8630 return m_n->tu == m2_n2->tu &&
8631 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8632 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8633 intel_compare_m_n(m_n->link_m, m_n->link_n,
8634 m2_n2->link_m, m2_n2->link_n, exact);
8638 intel_compare_infoframe(const union hdmi_infoframe *a,
8639 const union hdmi_infoframe *b)
8641 return memcmp(a, b, sizeof(*a)) == 0;
8645 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8646 const struct drm_dp_vsc_sdp *b)
8648 return memcmp(a, b, sizeof(*a)) == 0;
8652 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8653 bool fastset, const char *name,
8654 const union hdmi_infoframe *a,
8655 const union hdmi_infoframe *b)
8658 if (!drm_debug_enabled(DRM_UT_KMS))
8661 drm_dbg_kms(&dev_priv->drm,
8662 "fastset mismatch in %s infoframe\n", name);
8663 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8664 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8665 drm_dbg_kms(&dev_priv->drm, "found:\n");
8666 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8668 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8669 drm_err(&dev_priv->drm, "expected:\n");
8670 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8671 drm_err(&dev_priv->drm, "found:\n");
8672 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8677 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8678 bool fastset, const char *name,
8679 const struct drm_dp_vsc_sdp *a,
8680 const struct drm_dp_vsc_sdp *b)
8683 if (!drm_debug_enabled(DRM_UT_KMS))
8686 drm_dbg_kms(&dev_priv->drm,
8687 "fastset mismatch in %s dp sdp\n", name);
8688 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8689 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8690 drm_dbg_kms(&dev_priv->drm, "found:\n");
8691 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8693 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8694 drm_err(&dev_priv->drm, "expected:\n");
8695 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8696 drm_err(&dev_priv->drm, "found:\n");
8697 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8701 static void __printf(4, 5)
8702 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8703 const char *name, const char *format, ...)
8705 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8706 struct va_format vaf;
8709 va_start(args, format);
8714 drm_dbg_kms(&i915->drm,
8715 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8716 crtc->base.base.id, crtc->base.name, name, &vaf);
8718 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8719 crtc->base.base.id, crtc->base.name, name, &vaf);
8724 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8726 if (dev_priv->params.fastboot != -1)
8727 return dev_priv->params.fastboot;
8729 /* Enable fastboot by default on Skylake and newer */
8730 if (INTEL_GEN(dev_priv) >= 9)
8733 /* Enable fastboot by default on VLV and CHV */
8734 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8737 /* Disabled by default on all others */
8742 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8743 const struct intel_crtc_state *pipe_config,
8746 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8747 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8750 bool fixup_inherited = fastset &&
8751 current_config->inherited && !pipe_config->inherited;
8753 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8754 drm_dbg_kms(&dev_priv->drm,
8755 "initial modeset and fastboot not set\n");
8759 #define PIPE_CONF_CHECK_X(name) do { \
8760 if (current_config->name != pipe_config->name) { \
8761 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8762 "(expected 0x%08x, found 0x%08x)", \
8763 current_config->name, \
8764 pipe_config->name); \
8769 #define PIPE_CONF_CHECK_I(name) do { \
8770 if (current_config->name != pipe_config->name) { \
8771 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8772 "(expected %i, found %i)", \
8773 current_config->name, \
8774 pipe_config->name); \
8779 #define PIPE_CONF_CHECK_BOOL(name) do { \
8780 if (current_config->name != pipe_config->name) { \
8781 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8782 "(expected %s, found %s)", \
8783 yesno(current_config->name), \
8784 yesno(pipe_config->name)); \
8790 * Checks state where we only read out the enabling, but not the entire
8791 * state itself (like full infoframes or ELD for audio). These states
8792 * require a full modeset on bootup to fix up.
8794 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8795 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8796 PIPE_CONF_CHECK_BOOL(name); \
8798 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8799 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8800 yesno(current_config->name), \
8801 yesno(pipe_config->name)); \
8806 #define PIPE_CONF_CHECK_P(name) do { \
8807 if (current_config->name != pipe_config->name) { \
8808 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8809 "(expected %p, found %p)", \
8810 current_config->name, \
8811 pipe_config->name); \
8816 #define PIPE_CONF_CHECK_M_N(name) do { \
8817 if (!intel_compare_link_m_n(¤t_config->name, \
8818 &pipe_config->name,\
8820 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8821 "(expected tu %i gmch %i/%i link %i/%i, " \
8822 "found tu %i, gmch %i/%i link %i/%i)", \
8823 current_config->name.tu, \
8824 current_config->name.gmch_m, \
8825 current_config->name.gmch_n, \
8826 current_config->name.link_m, \
8827 current_config->name.link_n, \
8828 pipe_config->name.tu, \
8829 pipe_config->name.gmch_m, \
8830 pipe_config->name.gmch_n, \
8831 pipe_config->name.link_m, \
8832 pipe_config->name.link_n); \
8837 /* This is required for BDW+ where there is only one set of registers for
8838 * switching between high and low RR.
8839 * This macro can be used whenever a comparison has to be made between one
8840 * hw state and multiple sw state variables.
8842 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8843 if (!intel_compare_link_m_n(¤t_config->name, \
8844 &pipe_config->name, !fastset) && \
8845 !intel_compare_link_m_n(¤t_config->alt_name, \
8846 &pipe_config->name, !fastset)) { \
8847 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8848 "(expected tu %i gmch %i/%i link %i/%i, " \
8849 "or tu %i gmch %i/%i link %i/%i, " \
8850 "found tu %i, gmch %i/%i link %i/%i)", \
8851 current_config->name.tu, \
8852 current_config->name.gmch_m, \
8853 current_config->name.gmch_n, \
8854 current_config->name.link_m, \
8855 current_config->name.link_n, \
8856 current_config->alt_name.tu, \
8857 current_config->alt_name.gmch_m, \
8858 current_config->alt_name.gmch_n, \
8859 current_config->alt_name.link_m, \
8860 current_config->alt_name.link_n, \
8861 pipe_config->name.tu, \
8862 pipe_config->name.gmch_m, \
8863 pipe_config->name.gmch_n, \
8864 pipe_config->name.link_m, \
8865 pipe_config->name.link_n); \
8870 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8871 if ((current_config->name ^ pipe_config->name) & (mask)) { \
8872 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8873 "(%x) (expected %i, found %i)", \
8875 current_config->name & (mask), \
8876 pipe_config->name & (mask)); \
8881 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8882 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8883 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8884 "(expected %i, found %i)", \
8885 current_config->name, \
8886 pipe_config->name); \
8891 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8892 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
8893 &pipe_config->infoframes.name)) { \
8894 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8895 ¤t_config->infoframes.name, \
8896 &pipe_config->infoframes.name); \
8901 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8902 if (!current_config->has_psr && !pipe_config->has_psr && \
8903 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
8904 &pipe_config->infoframes.name)) { \
8905 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8906 ¤t_config->infoframes.name, \
8907 &pipe_config->infoframes.name); \
8912 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8913 if (current_config->name1 != pipe_config->name1) { \
8914 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8915 "(expected %i, found %i, won't compare lut values)", \
8916 current_config->name1, \
8917 pipe_config->name1); \
8920 if (!intel_color_lut_equal(current_config->name2, \
8921 pipe_config->name2, pipe_config->name1, \
8923 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8924 "hw_state doesn't match sw_state"); \
8930 #define PIPE_CONF_QUIRK(quirk) \
8931 ((current_config->quirks | pipe_config->quirks) & (quirk))
8933 PIPE_CONF_CHECK_I(cpu_transcoder);
8935 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8936 PIPE_CONF_CHECK_I(fdi_lanes);
8937 PIPE_CONF_CHECK_M_N(fdi_m_n);
8939 PIPE_CONF_CHECK_I(lane_count);
8940 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8942 if (INTEL_GEN(dev_priv) < 8) {
8943 PIPE_CONF_CHECK_M_N(dp_m_n);
8945 if (current_config->has_drrs)
8946 PIPE_CONF_CHECK_M_N(dp_m2_n2);
8948 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8950 PIPE_CONF_CHECK_X(output_types);
8952 /* FIXME do the readout properly and get rid of this quirk */
8953 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8954 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8955 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8956 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8957 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8958 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8959 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8961 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8962 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8963 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8964 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8965 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8966 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8968 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8969 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8970 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8971 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8972 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8973 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8975 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8976 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8977 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8978 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8979 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8980 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8982 PIPE_CONF_CHECK_I(pixel_multiplier);
8984 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8985 DRM_MODE_FLAG_INTERLACE);
8987 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8988 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8989 DRM_MODE_FLAG_PHSYNC);
8990 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8991 DRM_MODE_FLAG_NHSYNC);
8992 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8993 DRM_MODE_FLAG_PVSYNC);
8994 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8995 DRM_MODE_FLAG_NVSYNC);
8999 PIPE_CONF_CHECK_I(output_format);
9000 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
9001 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
9002 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9003 PIPE_CONF_CHECK_BOOL(limited_color_range);
9005 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
9006 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
9007 PIPE_CONF_CHECK_BOOL(has_infoframe);
9008 /* FIXME do the readout properly and get rid of this quirk */
9009 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9010 PIPE_CONF_CHECK_BOOL(fec_enable);
9012 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
9014 PIPE_CONF_CHECK_X(gmch_pfit.control);
9015 /* pfit ratios are autocomputed by the hw on gen4+ */
9016 if (INTEL_GEN(dev_priv) < 4)
9017 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
9018 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9021 * Changing the EDP transcoder input mux
9022 * (A_ONOFF vs. A_ON) requires a full modeset.
9024 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
9027 PIPE_CONF_CHECK_I(pipe_src_w);
9028 PIPE_CONF_CHECK_I(pipe_src_h);
9030 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
9031 if (current_config->pch_pfit.enabled) {
9032 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
9033 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
9034 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
9035 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
9038 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
9039 /* FIXME do the readout properly and get rid of this quirk */
9040 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9041 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
9043 PIPE_CONF_CHECK_X(gamma_mode);
9044 if (IS_CHERRYVIEW(dev_priv))
9045 PIPE_CONF_CHECK_X(cgm_mode);
9047 PIPE_CONF_CHECK_X(csc_mode);
9048 PIPE_CONF_CHECK_BOOL(gamma_enable);
9049 PIPE_CONF_CHECK_BOOL(csc_enable);
9051 PIPE_CONF_CHECK_I(linetime);
9052 PIPE_CONF_CHECK_I(ips_linetime);
9054 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
9056 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
9059 PIPE_CONF_CHECK_BOOL(double_wide);
9061 PIPE_CONF_CHECK_P(shared_dpll);
9063 /* FIXME do the readout properly and get rid of this quirk */
9064 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
9065 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9066 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9067 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9068 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9069 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
9070 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
9071 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
9072 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
9073 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
9074 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
9075 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
9076 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
9077 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
9078 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
9079 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
9080 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
9081 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
9082 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
9083 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
9084 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
9085 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
9086 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
9087 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
9088 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
9089 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
9090 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
9091 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
9092 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
9093 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
9094 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
9095 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
9097 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
9098 PIPE_CONF_CHECK_X(dsi_pll.div);
9100 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
9101 PIPE_CONF_CHECK_I(pipe_bpp);
9103 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
9104 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
9105 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9107 PIPE_CONF_CHECK_I(min_voltage_level);
9110 PIPE_CONF_CHECK_X(infoframes.enable);
9111 PIPE_CONF_CHECK_X(infoframes.gcp);
9112 PIPE_CONF_CHECK_INFOFRAME(avi);
9113 PIPE_CONF_CHECK_INFOFRAME(spd);
9114 PIPE_CONF_CHECK_INFOFRAME(hdmi);
9115 PIPE_CONF_CHECK_INFOFRAME(drm);
9116 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
9118 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
9119 PIPE_CONF_CHECK_I(master_transcoder);
9120 PIPE_CONF_CHECK_BOOL(bigjoiner);
9121 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
9122 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
9124 PIPE_CONF_CHECK_I(dsc.compression_enable);
9125 PIPE_CONF_CHECK_I(dsc.dsc_split);
9126 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
9128 PIPE_CONF_CHECK_BOOL(splitter.enable);
9129 PIPE_CONF_CHECK_I(splitter.link_count);
9130 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
9132 PIPE_CONF_CHECK_I(mst_master_transcoder);
9134 PIPE_CONF_CHECK_BOOL(vrr.enable);
9135 PIPE_CONF_CHECK_I(vrr.vmin);
9136 PIPE_CONF_CHECK_I(vrr.vmax);
9137 PIPE_CONF_CHECK_I(vrr.flipline);
9138 PIPE_CONF_CHECK_I(vrr.pipeline_full);
9140 #undef PIPE_CONF_CHECK_X
9141 #undef PIPE_CONF_CHECK_I
9142 #undef PIPE_CONF_CHECK_BOOL
9143 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
9144 #undef PIPE_CONF_CHECK_P
9145 #undef PIPE_CONF_CHECK_FLAGS
9146 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
9147 #undef PIPE_CONF_CHECK_COLOR_LUT
9148 #undef PIPE_CONF_QUIRK
9153 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
9154 const struct intel_crtc_state *pipe_config)
9156 if (pipe_config->has_pch_encoder) {
9157 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
9158 &pipe_config->fdi_m_n);
9159 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
9162 * FDI already provided one idea for the dotclock.
9163 * Yell if the encoder disagrees.
9165 drm_WARN(&dev_priv->drm,
9166 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
9167 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9168 fdi_dotclock, dotclock);
9172 static void verify_wm_state(struct intel_crtc *crtc,
9173 struct intel_crtc_state *new_crtc_state)
9175 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9176 struct skl_hw_state {
9177 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
9178 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
9179 struct skl_pipe_wm wm;
9181 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
9182 int level, max_level = ilk_wm_max_level(dev_priv);
9183 struct intel_plane *plane;
9184 u8 hw_enabled_slices;
9186 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
9189 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
9193 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
9195 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
9197 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
9199 if (INTEL_GEN(dev_priv) >= 11 &&
9200 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
9201 drm_err(&dev_priv->drm,
9202 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
9203 dev_priv->dbuf.enabled_slices,
9206 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9207 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
9208 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
9211 for (level = 0; level <= max_level; level++) {
9212 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
9213 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
9215 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
9218 drm_err(&dev_priv->drm,
9219 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9220 plane->base.base.id, plane->base.name, level,
9221 sw_wm_level->enable,
9222 sw_wm_level->blocks,
9224 hw_wm_level->enable,
9225 hw_wm_level->blocks,
9226 hw_wm_level->lines);
9229 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
9230 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
9232 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
9233 drm_err(&dev_priv->drm,
9234 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9235 plane->base.base.id, plane->base.name,
9236 sw_wm_level->enable,
9237 sw_wm_level->blocks,
9239 hw_wm_level->enable,
9240 hw_wm_level->blocks,
9241 hw_wm_level->lines);
9245 hw_ddb_entry = &hw->ddb_y[plane->id];
9246 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
9248 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
9249 drm_err(&dev_priv->drm,
9250 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
9251 plane->base.base.id, plane->base.name,
9252 sw_ddb_entry->start, sw_ddb_entry->end,
9253 hw_ddb_entry->start, hw_ddb_entry->end);
9261 verify_connector_state(struct intel_atomic_state *state,
9262 struct intel_crtc *crtc)
9264 struct drm_connector *connector;
9265 struct drm_connector_state *new_conn_state;
9268 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
9269 struct drm_encoder *encoder = connector->encoder;
9270 struct intel_crtc_state *crtc_state = NULL;
9272 if (new_conn_state->crtc != &crtc->base)
9276 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
9278 intel_connector_verify_state(crtc_state, new_conn_state);
9280 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
9281 "connector's atomic encoder doesn't match legacy encoder\n");
9286 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
9288 struct intel_encoder *encoder;
9289 struct drm_connector *connector;
9290 struct drm_connector_state *old_conn_state, *new_conn_state;
9293 for_each_intel_encoder(&dev_priv->drm, encoder) {
9294 bool enabled = false, found = false;
9297 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
9298 encoder->base.base.id,
9299 encoder->base.name);
9301 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
9302 new_conn_state, i) {
9303 if (old_conn_state->best_encoder == &encoder->base)
9306 if (new_conn_state->best_encoder != &encoder->base)
9308 found = enabled = true;
9310 I915_STATE_WARN(new_conn_state->crtc !=
9312 "connector's crtc doesn't match encoder crtc\n");
9318 I915_STATE_WARN(!!encoder->base.crtc != enabled,
9319 "encoder's enabled state mismatch "
9320 "(expected %i, found %i)\n",
9321 !!encoder->base.crtc, enabled);
9323 if (!encoder->base.crtc) {
9326 active = encoder->get_hw_state(encoder, &pipe);
9327 I915_STATE_WARN(active,
9328 "encoder detached but still enabled on pipe %c.\n",
9335 verify_crtc_state(struct intel_crtc *crtc,
9336 struct intel_crtc_state *old_crtc_state,
9337 struct intel_crtc_state *new_crtc_state)
9339 struct drm_device *dev = crtc->base.dev;
9340 struct drm_i915_private *dev_priv = to_i915(dev);
9341 struct intel_encoder *encoder;
9342 struct intel_crtc_state *pipe_config = old_crtc_state;
9343 struct drm_atomic_state *state = old_crtc_state->uapi.state;
9344 struct intel_crtc *master = crtc;
9346 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
9347 intel_crtc_free_hw_state(old_crtc_state);
9348 intel_crtc_state_reset(old_crtc_state, crtc);
9349 old_crtc_state->uapi.state = state;
9351 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
9354 pipe_config->hw.enable = new_crtc_state->hw.enable;
9356 intel_crtc_get_pipe_config(pipe_config);
9358 /* we keep both pipes enabled on 830 */
9359 if (IS_I830(dev_priv) && pipe_config->hw.active)
9360 pipe_config->hw.active = new_crtc_state->hw.active;
9362 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
9363 "crtc active state doesn't match with hw state "
9364 "(expected %i, found %i)\n",
9365 new_crtc_state->hw.active, pipe_config->hw.active);
9367 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
9368 "transitional active state does not match atomic hw state "
9369 "(expected %i, found %i)\n",
9370 new_crtc_state->hw.active, crtc->active);
9372 if (new_crtc_state->bigjoiner_slave)
9373 master = new_crtc_state->bigjoiner_linked_crtc;
9375 for_each_encoder_on_crtc(dev, &master->base, encoder) {
9379 active = encoder->get_hw_state(encoder, &pipe);
9380 I915_STATE_WARN(active != new_crtc_state->hw.active,
9381 "[ENCODER:%i] active %i with crtc active %i\n",
9382 encoder->base.base.id, active,
9383 new_crtc_state->hw.active);
9385 I915_STATE_WARN(active && master->pipe != pipe,
9386 "Encoder connected to wrong pipe %c\n",
9390 intel_encoder_get_config(encoder, pipe_config);
9393 if (!new_crtc_state->hw.active)
9396 intel_pipe_config_sanity_check(dev_priv, pipe_config);
9398 if (!intel_pipe_config_compare(new_crtc_state,
9399 pipe_config, false)) {
9400 I915_STATE_WARN(1, "pipe state doesn't match!\n");
9401 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
9402 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
9407 intel_verify_planes(struct intel_atomic_state *state)
9409 struct intel_plane *plane;
9410 const struct intel_plane_state *plane_state;
9413 for_each_new_intel_plane_in_state(state, plane,
9415 assert_plane(plane, plane_state->planar_slave ||
9416 plane_state->uapi.visible);
9420 verify_single_dpll_state(struct drm_i915_private *dev_priv,
9421 struct intel_shared_dpll *pll,
9422 struct intel_crtc *crtc,
9423 struct intel_crtc_state *new_crtc_state)
9425 struct intel_dpll_hw_state dpll_hw_state;
9429 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9431 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
9433 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
9435 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
9436 I915_STATE_WARN(!pll->on && pll->active_mask,
9437 "pll in active use but not on in sw tracking\n");
9438 I915_STATE_WARN(pll->on && !pll->active_mask,
9439 "pll is on but not used by any active pipe\n");
9440 I915_STATE_WARN(pll->on != active,
9441 "pll on state mismatch (expected %i, found %i)\n",
9446 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
9447 "more active pll users than references: 0x%x vs 0x%x\n",
9448 pll->active_mask, pll->state.pipe_mask);
9453 pipe_mask = BIT(crtc->pipe);
9455 if (new_crtc_state->hw.active)
9456 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
9457 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
9458 pipe_name(crtc->pipe), pll->active_mask);
9460 I915_STATE_WARN(pll->active_mask & pipe_mask,
9461 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
9462 pipe_name(crtc->pipe), pll->active_mask);
9464 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
9465 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
9466 pipe_mask, pll->state.pipe_mask);
9468 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
9470 sizeof(dpll_hw_state)),
9471 "pll hw state mismatch\n");
9475 verify_shared_dpll_state(struct intel_crtc *crtc,
9476 struct intel_crtc_state *old_crtc_state,
9477 struct intel_crtc_state *new_crtc_state)
9479 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9481 if (new_crtc_state->shared_dpll)
9482 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
9484 if (old_crtc_state->shared_dpll &&
9485 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
9486 u8 pipe_mask = BIT(crtc->pipe);
9487 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
9489 I915_STATE_WARN(pll->active_mask & pipe_mask,
9490 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
9491 pipe_name(crtc->pipe), pll->active_mask);
9492 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
9493 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
9494 pipe_name(crtc->pipe), pll->state.pipe_mask);
9499 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9500 struct intel_atomic_state *state,
9501 struct intel_crtc_state *old_crtc_state,
9502 struct intel_crtc_state *new_crtc_state)
9504 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9507 verify_wm_state(crtc, new_crtc_state);
9508 verify_connector_state(state, crtc);
9509 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9510 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9514 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9518 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9519 verify_single_dpll_state(dev_priv,
9520 &dev_priv->dpll.shared_dplls[i],
9525 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9526 struct intel_atomic_state *state)
9528 verify_encoder_state(dev_priv, state);
9529 verify_connector_state(state, NULL);
9530 verify_disabled_dpll_state(dev_priv);
9534 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9536 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9538 struct drm_display_mode adjusted_mode =
9539 crtc_state->hw.adjusted_mode;
9541 if (crtc_state->vrr.enable) {
9542 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9543 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9544 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9545 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9548 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9550 crtc->mode_flags = crtc_state->mode_flags;
9553 * The scanline counter increments at the leading edge of hsync.
9555 * On most platforms it starts counting from vtotal-1 on the
9556 * first active line. That means the scanline counter value is
9557 * always one less than what we would expect. Ie. just after
9558 * start of vblank, which also occurs at start of hsync (on the
9559 * last active line), the scanline counter will read vblank_start-1.
9561 * On gen2 the scanline counter starts counting from 1 instead
9562 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9563 * to keep the value positive), instead of adding one.
9565 * On HSW+ the behaviour of the scanline counter depends on the output
9566 * type. For DP ports it behaves like most other platforms, but on HDMI
9567 * there's an extra 1 line difference. So we need to add two instead of
9570 * On VLV/CHV DSI the scanline counter would appear to increment
9571 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9572 * that means we can't tell whether we're in vblank or not while
9573 * we're on that particular line. We must still set scanline_offset
9574 * to 1 so that the vblank timestamps come out correct when we query
9575 * the scanline counter from within the vblank interrupt handler.
9576 * However if queried just before the start of vblank we'll get an
9577 * answer that's slightly in the future.
9579 if (IS_GEN(dev_priv, 2)) {
9582 vtotal = adjusted_mode.crtc_vtotal;
9583 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9586 crtc->scanline_offset = vtotal - 1;
9587 } else if (HAS_DDI(dev_priv) &&
9588 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9589 crtc->scanline_offset = 2;
9591 crtc->scanline_offset = 1;
9595 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9597 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9598 struct intel_crtc_state *new_crtc_state;
9599 struct intel_crtc *crtc;
9602 if (!dev_priv->display.crtc_compute_clock)
9605 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9606 if (!intel_crtc_needs_modeset(new_crtc_state))
9609 intel_release_shared_dplls(state, crtc);
9614 * This implements the workaround described in the "notes" section of the mode
9615 * set sequence documentation. When going from no pipes or single pipe to
9616 * multiple pipes, and planes are enabled after the pipe, we need to wait at
9617 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9619 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9621 struct intel_crtc_state *crtc_state;
9622 struct intel_crtc *crtc;
9623 struct intel_crtc_state *first_crtc_state = NULL;
9624 struct intel_crtc_state *other_crtc_state = NULL;
9625 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9628 /* look at all crtc's that are going to be enabled in during modeset */
9629 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9630 if (!crtc_state->hw.active ||
9631 !intel_crtc_needs_modeset(crtc_state))
9634 if (first_crtc_state) {
9635 other_crtc_state = crtc_state;
9638 first_crtc_state = crtc_state;
9639 first_pipe = crtc->pipe;
9643 /* No workaround needed? */
9644 if (!first_crtc_state)
9647 /* w/a possibly needed, check how many crtc's are already enabled. */
9648 for_each_intel_crtc(state->base.dev, crtc) {
9649 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9650 if (IS_ERR(crtc_state))
9651 return PTR_ERR(crtc_state);
9653 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9655 if (!crtc_state->hw.active ||
9656 intel_crtc_needs_modeset(crtc_state))
9659 /* 2 or more enabled crtcs means no need for w/a */
9660 if (enabled_pipe != INVALID_PIPE)
9663 enabled_pipe = crtc->pipe;
9666 if (enabled_pipe != INVALID_PIPE)
9667 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9668 else if (other_crtc_state)
9669 other_crtc_state->hsw_workaround_pipe = first_pipe;
9674 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9677 const struct intel_crtc_state *crtc_state;
9678 struct intel_crtc *crtc;
9681 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9682 if (crtc_state->hw.active)
9683 active_pipes |= BIT(crtc->pipe);
9685 active_pipes &= ~BIT(crtc->pipe);
9688 return active_pipes;
9691 static int intel_modeset_checks(struct intel_atomic_state *state)
9693 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9695 state->modeset = true;
9697 if (IS_HASWELL(dev_priv))
9698 return hsw_mode_set_planes_workaround(state);
9704 * Handle calculation of various watermark data at the end of the atomic check
9705 * phase. The code here should be run after the per-crtc and per-plane 'check'
9706 * handlers to ensure that all derived state has been updated.
9708 static int calc_watermark_data(struct intel_atomic_state *state)
9710 struct drm_device *dev = state->base.dev;
9711 struct drm_i915_private *dev_priv = to_i915(dev);
9713 /* Is there platform-specific watermark information to calculate? */
9714 if (dev_priv->display.compute_global_watermarks)
9715 return dev_priv->display.compute_global_watermarks(state);
9720 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9721 struct intel_crtc_state *new_crtc_state)
9723 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9726 new_crtc_state->uapi.mode_changed = false;
9727 new_crtc_state->update_pipe = true;
9730 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9731 struct intel_crtc_state *new_crtc_state)
9734 * If we're not doing the full modeset we want to
9735 * keep the current M/N values as they may be
9736 * sufficiently different to the computed values
9737 * to cause problems.
9739 * FIXME: should really copy more fuzzy state here
9741 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9742 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9743 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9744 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9747 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9748 struct intel_crtc *crtc,
9751 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9752 struct intel_plane *plane;
9754 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9755 struct intel_plane_state *plane_state;
9757 if ((plane_ids_mask & BIT(plane->id)) == 0)
9760 plane_state = intel_atomic_get_plane_state(state, plane);
9761 if (IS_ERR(plane_state))
9762 return PTR_ERR(plane_state);
9768 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9769 struct intel_crtc *crtc)
9771 const struct intel_crtc_state *old_crtc_state =
9772 intel_atomic_get_old_crtc_state(state, crtc);
9773 const struct intel_crtc_state *new_crtc_state =
9774 intel_atomic_get_new_crtc_state(state, crtc);
9776 return intel_crtc_add_planes_to_state(state, crtc,
9777 old_crtc_state->enabled_planes |
9778 new_crtc_state->enabled_planes);
9781 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9783 /* See {hsw,vlv,ivb}_plane_ratio() */
9784 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9785 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9786 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
9789 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9790 struct intel_crtc *crtc,
9791 struct intel_crtc *other)
9793 const struct intel_plane_state *plane_state;
9794 struct intel_plane *plane;
9798 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9799 if (plane->pipe == crtc->pipe)
9800 plane_ids |= BIT(plane->id);
9803 return intel_crtc_add_planes_to_state(state, other, plane_ids);
9806 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9808 const struct intel_crtc_state *crtc_state;
9809 struct intel_crtc *crtc;
9812 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9815 if (!crtc_state->bigjoiner)
9818 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9819 crtc_state->bigjoiner_linked_crtc);
9827 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9829 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9830 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9831 struct intel_plane_state *plane_state;
9832 struct intel_plane *plane;
9833 struct intel_crtc *crtc;
9836 ret = icl_add_linked_planes(state);
9840 ret = intel_bigjoiner_add_affected_planes(state);
9844 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9845 ret = intel_plane_atomic_check(state, plane);
9847 drm_dbg_atomic(&dev_priv->drm,
9848 "[PLANE:%d:%s] atomic driver check failed\n",
9849 plane->base.base.id, plane->base.name);
9854 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9855 new_crtc_state, i) {
9856 u8 old_active_planes, new_active_planes;
9858 ret = icl_check_nv12_planes(new_crtc_state);
9863 * On some platforms the number of active planes affects
9864 * the planes' minimum cdclk calculation. Add such planes
9865 * to the state before we compute the minimum cdclk.
9867 if (!active_planes_affects_min_cdclk(dev_priv))
9870 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9871 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9874 * Not only the number of planes, but if the plane configuration had
9875 * changed might already mean we need to recompute min CDCLK,
9876 * because different planes might consume different amount of Dbuf bandwidth
9877 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
9879 if (old_active_planes == new_active_planes)
9882 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9890 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9891 bool *need_cdclk_calc)
9893 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9894 const struct intel_cdclk_state *old_cdclk_state;
9895 const struct intel_cdclk_state *new_cdclk_state;
9896 struct intel_plane_state *plane_state;
9897 struct intel_bw_state *new_bw_state;
9898 struct intel_plane *plane;
9904 * active_planes bitmask has been updated, and potentially
9905 * affected planes are part of the state. We can now
9906 * compute the minimum cdclk for each plane.
9908 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9909 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9914 old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9915 new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9917 if (new_cdclk_state &&
9918 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9919 *need_cdclk_calc = true;
9921 ret = dev_priv->display.bw_calc_min_cdclk(state);
9925 new_bw_state = intel_atomic_get_new_bw_state(state);
9927 if (!new_cdclk_state || !new_bw_state)
9930 for_each_pipe(dev_priv, pipe) {
9931 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9934 * Currently do this change only if we need to increase
9936 if (new_bw_state->min_cdclk > min_cdclk)
9937 *need_cdclk_calc = true;
9943 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9945 struct intel_crtc_state *crtc_state;
9946 struct intel_crtc *crtc;
9949 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9950 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9953 ret = intel_crtc_atomic_check(state, crtc);
9955 drm_dbg_atomic(&i915->drm,
9956 "[CRTC:%d:%s] atomic driver check failed\n",
9957 crtc->base.base.id, crtc->base.name);
9965 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9968 const struct intel_crtc_state *new_crtc_state;
9969 struct intel_crtc *crtc;
9972 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9973 if (new_crtc_state->hw.enable &&
9974 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9975 intel_crtc_needs_modeset(new_crtc_state))
9982 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9983 struct intel_crtc *crtc,
9984 struct intel_crtc_state *old_crtc_state,
9985 struct intel_crtc_state *new_crtc_state)
9987 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9988 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9989 struct intel_crtc *slave, *master;
9991 /* slave being enabled, is master is still claiming this crtc? */
9992 if (old_crtc_state->bigjoiner_slave) {
9994 master = old_crtc_state->bigjoiner_linked_crtc;
9995 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9996 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
10000 if (!new_crtc_state->bigjoiner)
10003 if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
10004 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
10005 "CRTC + 1 to be used, doesn't exist\n",
10006 crtc->base.base.id, crtc->base.name);
10010 slave = new_crtc_state->bigjoiner_linked_crtc =
10011 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
10012 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
10014 if (IS_ERR(slave_crtc_state))
10015 return PTR_ERR(slave_crtc_state);
10017 /* master being enabled, slave was already configured? */
10018 if (slave_crtc_state->uapi.enable)
10021 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
10022 slave->base.base.id, slave->base.name);
10024 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
10027 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
10028 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
10029 slave->base.base.id, slave->base.name,
10030 master->base.base.id, master->base.name);
10034 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
10035 struct intel_crtc_state *master_crtc_state)
10037 struct intel_crtc_state *slave_crtc_state =
10038 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
10040 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
10041 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
10042 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
10043 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
10047 * DOC: asynchronous flip implementation
10049 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
10050 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
10051 * Correspondingly, support is currently added for primary plane only.
10053 * Async flip can only change the plane surface address, so anything else
10054 * changing is rejected from the intel_atomic_check_async() function.
10055 * Once this check is cleared, flip done interrupt is enabled using
10056 * the intel_crtc_enable_flip_done() function.
10058 * As soon as the surface address register is written, flip done interrupt is
10059 * generated and the requested events are sent to the usersapce in the interrupt
10060 * handler itself. The timestamp and sequence sent during the flip done event
10061 * correspond to the last vblank and have no relation to the actual time when
10062 * the flip done event was sent.
10064 static int intel_atomic_check_async(struct intel_atomic_state *state)
10066 struct drm_i915_private *i915 = to_i915(state->base.dev);
10067 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10068 const struct intel_plane_state *new_plane_state, *old_plane_state;
10069 struct intel_crtc *crtc;
10070 struct intel_plane *plane;
10073 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10074 new_crtc_state, i) {
10075 if (intel_crtc_needs_modeset(new_crtc_state)) {
10076 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
10080 if (!new_crtc_state->hw.active) {
10081 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
10084 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
10085 drm_dbg_kms(&i915->drm,
10086 "Active planes cannot be changed during async flip\n");
10091 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10092 new_plane_state, i) {
10094 * TODO: Async flip is only supported through the page flip IOCTL
10095 * as of now. So support currently added for primary plane only.
10096 * Support for other planes on platforms on which supports
10097 * this(vlv/chv and icl+) should be added when async flip is
10098 * enabled in the atomic IOCTL path.
10100 if (!plane->async_flip)
10104 * FIXME: This check is kept generic for all platforms.
10105 * Need to verify this for all gen9 and gen10 platforms to enable
10106 * this selectively if required.
10108 switch (new_plane_state->hw.fb->modifier) {
10109 case I915_FORMAT_MOD_X_TILED:
10110 case I915_FORMAT_MOD_Y_TILED:
10111 case I915_FORMAT_MOD_Yf_TILED:
10114 drm_dbg_kms(&i915->drm,
10115 "Linear memory/CCS does not support async flips\n");
10119 if (old_plane_state->color_plane[0].stride !=
10120 new_plane_state->color_plane[0].stride) {
10121 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
10125 if (old_plane_state->hw.fb->modifier !=
10126 new_plane_state->hw.fb->modifier) {
10127 drm_dbg_kms(&i915->drm,
10128 "Framebuffer modifiers cannot be changed in async flip\n");
10132 if (old_plane_state->hw.fb->format !=
10133 new_plane_state->hw.fb->format) {
10134 drm_dbg_kms(&i915->drm,
10135 "Framebuffer format cannot be changed in async flip\n");
10139 if (old_plane_state->hw.rotation !=
10140 new_plane_state->hw.rotation) {
10141 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
10145 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
10146 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
10147 drm_dbg_kms(&i915->drm,
10148 "Plane size/co-ordinates cannot be changed in async flip\n");
10152 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
10153 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
10157 if (old_plane_state->hw.pixel_blend_mode !=
10158 new_plane_state->hw.pixel_blend_mode) {
10159 drm_dbg_kms(&i915->drm,
10160 "Pixel blend mode cannot be changed in async flip\n");
10164 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
10165 drm_dbg_kms(&i915->drm,
10166 "Color encoding cannot be changed in async flip\n");
10170 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
10171 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
10179 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
10181 struct intel_crtc_state *crtc_state;
10182 struct intel_crtc *crtc;
10185 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10186 struct intel_crtc_state *linked_crtc_state;
10187 struct intel_crtc *linked_crtc;
10190 if (!crtc_state->bigjoiner)
10193 linked_crtc = crtc_state->bigjoiner_linked_crtc;
10194 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
10195 if (IS_ERR(linked_crtc_state))
10196 return PTR_ERR(linked_crtc_state);
10198 if (!intel_crtc_needs_modeset(crtc_state))
10201 linked_crtc_state->uapi.mode_changed = true;
10203 ret = drm_atomic_add_affected_connectors(&state->base,
10204 &linked_crtc->base);
10208 ret = intel_atomic_add_affected_planes(state, linked_crtc);
10213 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10214 /* Kill old bigjoiner link, we may re-establish afterwards */
10215 if (intel_crtc_needs_modeset(crtc_state) &&
10216 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
10217 kill_bigjoiner_slave(state, crtc_state);
10224 * intel_atomic_check - validate state object
10226 * @_state: state to validate
10228 static int intel_atomic_check(struct drm_device *dev,
10229 struct drm_atomic_state *_state)
10231 struct drm_i915_private *dev_priv = to_i915(dev);
10232 struct intel_atomic_state *state = to_intel_atomic_state(_state);
10233 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10234 struct intel_crtc *crtc;
10236 bool any_ms = false;
10238 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10239 new_crtc_state, i) {
10240 if (new_crtc_state->inherited != old_crtc_state->inherited)
10241 new_crtc_state->uapi.mode_changed = true;
10244 intel_vrr_check_modeset(state);
10246 ret = drm_atomic_helper_check_modeset(dev, &state->base);
10250 ret = intel_bigjoiner_add_affected_crtcs(state);
10254 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10255 new_crtc_state, i) {
10256 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10258 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
10263 if (!new_crtc_state->uapi.enable) {
10264 if (!new_crtc_state->bigjoiner_slave) {
10265 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
10271 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
10275 ret = intel_modeset_pipe_config(state, new_crtc_state);
10279 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
10285 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10286 new_crtc_state, i) {
10287 if (!intel_crtc_needs_modeset(new_crtc_state))
10290 ret = intel_modeset_pipe_config_late(new_crtc_state);
10294 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
10298 * Check if fastset is allowed by external dependencies like other
10299 * pipes and transcoders.
10301 * Right now it only forces a fullmodeset when the MST master
10302 * transcoder did not changed but the pipe of the master transcoder
10303 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
10304 * in case of port synced crtcs, if one of the synced crtcs
10305 * needs a full modeset, all other synced crtcs should be
10306 * forced a full modeset.
10308 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10309 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
10312 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
10313 enum transcoder master = new_crtc_state->mst_master_transcoder;
10315 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
10316 new_crtc_state->uapi.mode_changed = true;
10317 new_crtc_state->update_pipe = false;
10321 if (is_trans_port_sync_mode(new_crtc_state)) {
10322 u8 trans = new_crtc_state->sync_mode_slaves_mask;
10324 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
10325 trans |= BIT(new_crtc_state->master_transcoder);
10327 if (intel_cpu_transcoders_need_modeset(state, trans)) {
10328 new_crtc_state->uapi.mode_changed = true;
10329 new_crtc_state->update_pipe = false;
10333 if (new_crtc_state->bigjoiner) {
10334 struct intel_crtc_state *linked_crtc_state =
10335 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
10337 if (intel_crtc_needs_modeset(linked_crtc_state)) {
10338 new_crtc_state->uapi.mode_changed = true;
10339 new_crtc_state->update_pipe = false;
10344 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10345 new_crtc_state, i) {
10346 if (intel_crtc_needs_modeset(new_crtc_state)) {
10351 if (!new_crtc_state->update_pipe)
10354 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
10357 if (any_ms && !check_digital_port_conflicts(state)) {
10358 drm_dbg_kms(&dev_priv->drm,
10359 "rejecting conflicting digital port configuration\n");
10364 ret = drm_dp_mst_atomic_check(&state->base);
10368 ret = intel_atomic_check_planes(state);
10372 intel_fbc_choose_crtc(dev_priv, state);
10373 ret = calc_watermark_data(state);
10377 ret = intel_bw_atomic_check(state);
10381 ret = intel_atomic_check_cdclk(state, &any_ms);
10386 ret = intel_modeset_checks(state);
10390 ret = intel_modeset_calc_cdclk(state);
10394 intel_modeset_clear_plls(state);
10397 ret = intel_atomic_check_crtcs(state);
10401 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10402 new_crtc_state, i) {
10403 if (new_crtc_state->uapi.async_flip) {
10404 ret = intel_atomic_check_async(state);
10409 if (!intel_crtc_needs_modeset(new_crtc_state) &&
10410 !new_crtc_state->update_pipe)
10413 intel_dump_pipe_config(new_crtc_state, state,
10414 intel_crtc_needs_modeset(new_crtc_state) ?
10415 "[modeset]" : "[fastset]");
10421 if (ret == -EDEADLK)
10425 * FIXME would probably be nice to know which crtc specifically
10426 * caused the failure, in cases where we can pinpoint it.
10428 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10430 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10435 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10437 struct intel_crtc_state *crtc_state;
10438 struct intel_crtc *crtc;
10441 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10445 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10446 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10448 if (mode_changed || crtc_state->update_pipe ||
10449 crtc_state->uapi.color_mgmt_changed) {
10450 intel_dsb_prepare(crtc_state);
10457 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10458 struct intel_crtc_state *crtc_state)
10460 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10462 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
10463 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10465 if (crtc_state->has_pch_encoder) {
10466 enum pipe pch_transcoder =
10467 intel_crtc_pch_transcoder(crtc);
10469 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10473 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10474 const struct intel_crtc_state *new_crtc_state)
10476 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10477 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10480 * Update pipe size and adjust fitter if needed: the reason for this is
10481 * that in compute_mode_changes we check the native mode (not the pfit
10482 * mode) to see if we can flip rather than do a full mode set. In the
10483 * fastboot case, we'll flip, but if we don't update the pipesrc and
10484 * pfit state, we'll end up with a big fb scanned out into the wrong
10487 intel_set_pipe_src_size(new_crtc_state);
10489 /* on skylake this is done by detaching scalers */
10490 if (INTEL_GEN(dev_priv) >= 9) {
10491 skl_detach_scalers(new_crtc_state);
10493 if (new_crtc_state->pch_pfit.enabled)
10494 skl_pfit_enable(new_crtc_state);
10495 } else if (HAS_PCH_SPLIT(dev_priv)) {
10496 if (new_crtc_state->pch_pfit.enabled)
10497 ilk_pfit_enable(new_crtc_state);
10498 else if (old_crtc_state->pch_pfit.enabled)
10499 ilk_pfit_disable(old_crtc_state);
10503 * The register is supposedly single buffered so perhaps
10504 * not 100% correct to do this here. But SKL+ calculate
10505 * this based on the adjust pixel rate so pfit changes do
10506 * affect it and so it must be updated for fastsets.
10507 * HSW/BDW only really need this here for fastboot, after
10508 * that the value should not change without a full modeset.
10510 if (INTEL_GEN(dev_priv) >= 9 ||
10511 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10512 hsw_set_linetime_wm(new_crtc_state);
10514 if (INTEL_GEN(dev_priv) >= 11)
10515 icl_set_pipe_chicken(crtc);
10518 static void commit_pipe_config(struct intel_atomic_state *state,
10519 struct intel_crtc *crtc)
10521 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10522 const struct intel_crtc_state *old_crtc_state =
10523 intel_atomic_get_old_crtc_state(state, crtc);
10524 const struct intel_crtc_state *new_crtc_state =
10525 intel_atomic_get_new_crtc_state(state, crtc);
10526 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10529 * During modesets pipe configuration was programmed as the
10530 * CRTC was enabled.
10533 if (new_crtc_state->uapi.color_mgmt_changed ||
10534 new_crtc_state->update_pipe)
10535 intel_color_commit(new_crtc_state);
10537 if (INTEL_GEN(dev_priv) >= 9)
10538 skl_detach_scalers(new_crtc_state);
10540 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10541 bdw_set_pipemisc(new_crtc_state);
10543 if (new_crtc_state->update_pipe)
10544 intel_pipe_fastset(old_crtc_state, new_crtc_state);
10546 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10549 if (dev_priv->display.atomic_update_watermarks)
10550 dev_priv->display.atomic_update_watermarks(state, crtc);
10553 static void intel_enable_crtc(struct intel_atomic_state *state,
10554 struct intel_crtc *crtc)
10556 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10557 const struct intel_crtc_state *new_crtc_state =
10558 intel_atomic_get_new_crtc_state(state, crtc);
10560 if (!intel_crtc_needs_modeset(new_crtc_state))
10563 intel_crtc_update_active_timings(new_crtc_state);
10565 dev_priv->display.crtc_enable(state, crtc);
10567 if (new_crtc_state->bigjoiner_slave)
10570 /* vblanks work again, re-enable pipe CRC. */
10571 intel_crtc_enable_pipe_crc(crtc);
10574 static void intel_update_crtc(struct intel_atomic_state *state,
10575 struct intel_crtc *crtc)
10577 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10578 const struct intel_crtc_state *old_crtc_state =
10579 intel_atomic_get_old_crtc_state(state, crtc);
10580 struct intel_crtc_state *new_crtc_state =
10581 intel_atomic_get_new_crtc_state(state, crtc);
10582 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10585 if (new_crtc_state->preload_luts &&
10586 (new_crtc_state->uapi.color_mgmt_changed ||
10587 new_crtc_state->update_pipe))
10588 intel_color_load_luts(new_crtc_state);
10590 intel_pre_plane_update(state, crtc);
10592 if (new_crtc_state->update_pipe)
10593 intel_encoders_update_pipe(state, crtc);
10596 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10597 intel_fbc_disable(crtc);
10599 intel_fbc_enable(state, crtc);
10601 /* Perform vblank evasion around commit operation */
10602 intel_pipe_update_start(new_crtc_state);
10604 commit_pipe_config(state, crtc);
10606 if (INTEL_GEN(dev_priv) >= 9)
10607 skl_update_planes_on_crtc(state, crtc);
10609 i9xx_update_planes_on_crtc(state, crtc);
10611 intel_pipe_update_end(new_crtc_state);
10614 * We usually enable FIFO underrun interrupts as part of the
10615 * CRTC enable sequence during modesets. But when we inherit a
10616 * valid pipe configuration from the BIOS we need to take care
10617 * of enabling them on the CRTC's first fastset.
10619 if (new_crtc_state->update_pipe && !modeset &&
10620 old_crtc_state->inherited)
10621 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10624 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10625 struct intel_crtc_state *old_crtc_state,
10626 struct intel_crtc_state *new_crtc_state,
10627 struct intel_crtc *crtc)
10629 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10631 drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10633 intel_crtc_disable_planes(state, crtc);
10636 * We still need special handling for disabling bigjoiner master
10637 * and slaves since for slave we do not have encoder or plls
10638 * so we dont need to disable those.
10640 if (old_crtc_state->bigjoiner) {
10641 intel_crtc_disable_planes(state,
10642 old_crtc_state->bigjoiner_linked_crtc);
10643 old_crtc_state->bigjoiner_linked_crtc->active = false;
10647 * We need to disable pipe CRC before disabling the pipe,
10648 * or we race against vblank off.
10650 intel_crtc_disable_pipe_crc(crtc);
10652 dev_priv->display.crtc_disable(state, crtc);
10653 crtc->active = false;
10654 intel_fbc_disable(crtc);
10655 intel_disable_shared_dpll(old_crtc_state);
10657 /* FIXME unify this for all platforms */
10658 if (!new_crtc_state->hw.active &&
10659 !HAS_GMCH(dev_priv) &&
10660 dev_priv->display.initial_watermarks)
10661 dev_priv->display.initial_watermarks(state, crtc);
10664 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10666 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10667 struct intel_crtc *crtc;
10671 /* Only disable port sync and MST slaves */
10672 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10673 new_crtc_state, i) {
10674 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10677 if (!old_crtc_state->hw.active)
10680 /* In case of Transcoder port Sync master slave CRTCs can be
10681 * assigned in any order and we need to make sure that
10682 * slave CRTCs are disabled first and then master CRTC since
10683 * Slave vblanks are masked till Master Vblanks.
10685 if (!is_trans_port_sync_slave(old_crtc_state) &&
10686 !intel_dp_mst_is_slave_trans(old_crtc_state))
10689 intel_pre_plane_update(state, crtc);
10690 intel_old_crtc_state_disables(state, old_crtc_state,
10691 new_crtc_state, crtc);
10692 handled |= BIT(crtc->pipe);
10695 /* Disable everything else left on */
10696 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10697 new_crtc_state, i) {
10698 if (!intel_crtc_needs_modeset(new_crtc_state) ||
10699 (handled & BIT(crtc->pipe)) ||
10700 old_crtc_state->bigjoiner_slave)
10703 intel_pre_plane_update(state, crtc);
10704 if (old_crtc_state->bigjoiner) {
10705 struct intel_crtc *slave =
10706 old_crtc_state->bigjoiner_linked_crtc;
10708 intel_pre_plane_update(state, slave);
10711 if (old_crtc_state->hw.active)
10712 intel_old_crtc_state_disables(state, old_crtc_state,
10713 new_crtc_state, crtc);
10717 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10719 struct intel_crtc_state *new_crtc_state;
10720 struct intel_crtc *crtc;
10723 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10724 if (!new_crtc_state->hw.active)
10727 intel_enable_crtc(state, crtc);
10728 intel_update_crtc(state, crtc);
10732 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10734 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10735 struct intel_crtc *crtc;
10736 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10737 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10738 u8 update_pipes = 0, modeset_pipes = 0;
10741 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10742 enum pipe pipe = crtc->pipe;
10744 if (!new_crtc_state->hw.active)
10747 /* ignore allocations for crtc's that have been turned off. */
10748 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10749 entries[pipe] = old_crtc_state->wm.skl.ddb;
10750 update_pipes |= BIT(pipe);
10752 modeset_pipes |= BIT(pipe);
10757 * Whenever the number of active pipes changes, we need to make sure we
10758 * update the pipes in the right order so that their ddb allocations
10759 * never overlap with each other between CRTC updates. Otherwise we'll
10760 * cause pipe underruns and other bad stuff.
10762 * So first lets enable all pipes that do not need a fullmodeset as
10763 * those don't have any external dependency.
10765 while (update_pipes) {
10766 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10767 new_crtc_state, i) {
10768 enum pipe pipe = crtc->pipe;
10770 if ((update_pipes & BIT(pipe)) == 0)
10773 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10774 entries, I915_MAX_PIPES, pipe))
10777 entries[pipe] = new_crtc_state->wm.skl.ddb;
10778 update_pipes &= ~BIT(pipe);
10780 intel_update_crtc(state, crtc);
10783 * If this is an already active pipe, it's DDB changed,
10784 * and this isn't the last pipe that needs updating
10785 * then we need to wait for a vblank to pass for the
10786 * new ddb allocation to take effect.
10788 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10789 &old_crtc_state->wm.skl.ddb) &&
10790 (update_pipes | modeset_pipes))
10791 intel_wait_for_vblank(dev_priv, pipe);
10795 update_pipes = modeset_pipes;
10798 * Enable all pipes that needs a modeset and do not depends on other
10801 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10802 enum pipe pipe = crtc->pipe;
10804 if ((modeset_pipes & BIT(pipe)) == 0)
10807 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10808 is_trans_port_sync_master(new_crtc_state) ||
10809 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10812 modeset_pipes &= ~BIT(pipe);
10814 intel_enable_crtc(state, crtc);
10818 * Then we enable all remaining pipes that depend on other
10819 * pipes: MST slaves and port sync masters, big joiner master
10821 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10822 enum pipe pipe = crtc->pipe;
10824 if ((modeset_pipes & BIT(pipe)) == 0)
10827 modeset_pipes &= ~BIT(pipe);
10829 intel_enable_crtc(state, crtc);
10833 * Finally we do the plane updates/etc. for all pipes that got enabled.
10835 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10836 enum pipe pipe = crtc->pipe;
10838 if ((update_pipes & BIT(pipe)) == 0)
10841 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10842 entries, I915_MAX_PIPES, pipe));
10844 entries[pipe] = new_crtc_state->wm.skl.ddb;
10845 update_pipes &= ~BIT(pipe);
10847 intel_update_crtc(state, crtc);
10850 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10851 drm_WARN_ON(&dev_priv->drm, update_pipes);
10854 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10856 struct intel_atomic_state *state, *next;
10857 struct llist_node *freed;
10859 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10860 llist_for_each_entry_safe(state, next, freed, freed)
10861 drm_atomic_state_put(&state->base);
10864 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10866 struct drm_i915_private *dev_priv =
10867 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10869 intel_atomic_helper_free_state(dev_priv);
10872 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10874 struct wait_queue_entry wait_fence, wait_reset;
10875 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10877 init_wait_entry(&wait_fence, 0);
10878 init_wait_entry(&wait_reset, 0);
10880 prepare_to_wait(&intel_state->commit_ready.wait,
10881 &wait_fence, TASK_UNINTERRUPTIBLE);
10882 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10883 I915_RESET_MODESET),
10884 &wait_reset, TASK_UNINTERRUPTIBLE);
10887 if (i915_sw_fence_done(&intel_state->commit_ready) ||
10888 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10893 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10894 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10895 I915_RESET_MODESET),
10899 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10901 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10902 struct intel_crtc *crtc;
10905 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10907 intel_dsb_cleanup(old_crtc_state);
10910 static void intel_atomic_cleanup_work(struct work_struct *work)
10912 struct intel_atomic_state *state =
10913 container_of(work, struct intel_atomic_state, base.commit_work);
10914 struct drm_i915_private *i915 = to_i915(state->base.dev);
10916 intel_cleanup_dsbs(state);
10917 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10918 drm_atomic_helper_commit_cleanup_done(&state->base);
10919 drm_atomic_state_put(&state->base);
10921 intel_atomic_helper_free_state(i915);
10924 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10926 struct drm_i915_private *i915 = to_i915(state->base.dev);
10927 struct intel_plane *plane;
10928 struct intel_plane_state *plane_state;
10931 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10932 struct drm_framebuffer *fb = plane_state->hw.fb;
10936 fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10940 * The layout of the fast clear color value expected by HW
10941 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10942 * - 4 x 4 bytes per-channel value
10943 * (in surface type specific float/int format provided by the fb user)
10944 * - 8 bytes native color value used by the display
10945 * (converted/written by GPU during a fast clear operation using the
10946 * above per-channel values)
10948 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10949 * caller made sure that the object is synced wrt. the related color clear value
10952 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10953 fb->offsets[2] + 16,
10954 &plane_state->ccval,
10955 sizeof(plane_state->ccval));
10956 /* The above could only fail if the FB obj has an unexpected backing store type. */
10957 drm_WARN_ON(&i915->drm, ret);
10961 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10963 struct drm_device *dev = state->base.dev;
10964 struct drm_i915_private *dev_priv = to_i915(dev);
10965 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10966 struct intel_crtc *crtc;
10967 u64 put_domains[I915_MAX_PIPES] = {};
10968 intel_wakeref_t wakeref = 0;
10971 intel_atomic_commit_fence_wait(state);
10973 drm_atomic_helper_wait_for_dependencies(&state->base);
10975 if (state->modeset)
10976 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10978 intel_atomic_prepare_plane_clear_colors(state);
10980 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10981 new_crtc_state, i) {
10982 if (intel_crtc_needs_modeset(new_crtc_state) ||
10983 new_crtc_state->update_pipe) {
10985 put_domains[crtc->pipe] =
10986 modeset_get_crtc_power_domains(new_crtc_state);
10990 intel_commit_modeset_disables(state);
10992 /* FIXME: Eventually get rid of our crtc->config pointer */
10993 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10994 crtc->config = new_crtc_state;
10996 if (state->modeset) {
10997 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10999 intel_set_cdclk_pre_plane_update(state);
11001 intel_modeset_verify_disabled(dev_priv, state);
11004 intel_sagv_pre_plane_update(state);
11006 /* Complete the events for pipes that have now been disabled */
11007 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11008 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11010 /* Complete events for now disable pipes here. */
11011 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
11012 spin_lock_irq(&dev->event_lock);
11013 drm_crtc_send_vblank_event(&crtc->base,
11014 new_crtc_state->uapi.event);
11015 spin_unlock_irq(&dev->event_lock);
11017 new_crtc_state->uapi.event = NULL;
11021 if (state->modeset)
11022 intel_encoders_update_prepare(state);
11024 intel_dbuf_pre_plane_update(state);
11026 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11027 if (new_crtc_state->uapi.async_flip)
11028 intel_crtc_enable_flip_done(state, crtc);
11031 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
11032 dev_priv->display.commit_modeset_enables(state);
11034 if (state->modeset) {
11035 intel_encoders_update_complete(state);
11037 intel_set_cdclk_post_plane_update(state);
11040 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
11041 * already, but still need the state for the delayed optimization. To
11043 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
11044 * - schedule that vblank worker _before_ calling hw_done
11045 * - at the start of commit_tail, cancel it _synchrously
11046 * - switch over to the vblank wait helper in the core after that since
11047 * we don't need out special handling any more.
11049 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
11051 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11052 if (new_crtc_state->uapi.async_flip)
11053 intel_crtc_disable_flip_done(state, crtc);
11055 if (new_crtc_state->hw.active &&
11056 !intel_crtc_needs_modeset(new_crtc_state) &&
11057 !new_crtc_state->preload_luts &&
11058 (new_crtc_state->uapi.color_mgmt_changed ||
11059 new_crtc_state->update_pipe))
11060 intel_color_load_luts(new_crtc_state);
11064 * Now that the vblank has passed, we can go ahead and program the
11065 * optimal watermarks on platforms that need two-step watermark
11068 * TODO: Move this (and other cleanup) to an async worker eventually.
11070 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11071 new_crtc_state, i) {
11073 * Gen2 reports pipe underruns whenever all planes are disabled.
11074 * So re-enable underrun reporting after some planes get enabled.
11076 * We do this before .optimize_watermarks() so that we have a
11077 * chance of catching underruns with the intermediate watermarks
11078 * vs. the new plane configuration.
11080 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
11081 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
11083 if (dev_priv->display.optimize_watermarks)
11084 dev_priv->display.optimize_watermarks(state, crtc);
11087 intel_dbuf_post_plane_update(state);
11089 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11090 intel_post_plane_update(state, crtc);
11092 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
11094 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
11097 * DSB cleanup is done in cleanup_work aligning with framebuffer
11098 * cleanup. So copy and reset the dsb structure to sync with
11099 * commit_done and later do dsb cleanup in cleanup_work.
11101 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
11104 /* Underruns don't always raise interrupts, so check manually */
11105 intel_check_cpu_fifo_underruns(dev_priv);
11106 intel_check_pch_fifo_underruns(dev_priv);
11108 if (state->modeset)
11109 intel_verify_planes(state);
11111 intel_sagv_post_plane_update(state);
11113 drm_atomic_helper_commit_hw_done(&state->base);
11115 if (state->modeset) {
11116 /* As one of the primary mmio accessors, KMS has a high
11117 * likelihood of triggering bugs in unclaimed access. After we
11118 * finish modesetting, see if an error has been flagged, and if
11119 * so enable debugging for the next modeset - and hope we catch
11122 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
11123 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
11125 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11128 * Defer the cleanup of the old state to a separate worker to not
11129 * impede the current task (userspace for blocking modesets) that
11130 * are executed inline. For out-of-line asynchronous modesets/flips,
11131 * deferring to a new worker seems overkill, but we would place a
11132 * schedule point (cond_resched()) here anyway to keep latencies
11135 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
11136 queue_work(system_highpri_wq, &state->base.commit_work);
11139 static void intel_atomic_commit_work(struct work_struct *work)
11141 struct intel_atomic_state *state =
11142 container_of(work, struct intel_atomic_state, base.commit_work);
11144 intel_atomic_commit_tail(state);
11147 static int __i915_sw_fence_call
11148 intel_atomic_commit_ready(struct i915_sw_fence *fence,
11149 enum i915_sw_fence_notify notify)
11151 struct intel_atomic_state *state =
11152 container_of(fence, struct intel_atomic_state, commit_ready);
11155 case FENCE_COMPLETE:
11156 /* we do blocking waits in the worker, nothing to do here */
11160 struct intel_atomic_helper *helper =
11161 &to_i915(state->base.dev)->atomic_helper;
11163 if (llist_add(&state->freed, &helper->free_list))
11164 schedule_work(&helper->free_work);
11169 return NOTIFY_DONE;
11172 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
11174 struct intel_plane_state *old_plane_state, *new_plane_state;
11175 struct intel_plane *plane;
11178 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
11179 new_plane_state, i)
11180 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
11181 to_intel_frontbuffer(new_plane_state->hw.fb),
11182 plane->frontbuffer_bit);
11185 static int intel_atomic_commit(struct drm_device *dev,
11186 struct drm_atomic_state *_state,
11189 struct intel_atomic_state *state = to_intel_atomic_state(_state);
11190 struct drm_i915_private *dev_priv = to_i915(dev);
11193 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
11195 drm_atomic_state_get(&state->base);
11196 i915_sw_fence_init(&state->commit_ready,
11197 intel_atomic_commit_ready);
11200 * The intel_legacy_cursor_update() fast path takes care
11201 * of avoiding the vblank waits for simple cursor
11202 * movement and flips. For cursor on/off and size changes,
11203 * we want to perform the vblank waits so that watermark
11204 * updates happen during the correct frames. Gen9+ have
11205 * double buffered watermarks and so shouldn't need this.
11207 * Unset state->legacy_cursor_update before the call to
11208 * drm_atomic_helper_setup_commit() because otherwise
11209 * drm_atomic_helper_wait_for_flip_done() is a noop and
11210 * we get FIFO underruns because we didn't wait
11213 * FIXME doing watermarks and fb cleanup from a vblank worker
11214 * (assuming we had any) would solve these problems.
11216 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
11217 struct intel_crtc_state *new_crtc_state;
11218 struct intel_crtc *crtc;
11221 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11222 if (new_crtc_state->wm.need_postvbl_update ||
11223 new_crtc_state->update_wm_post)
11224 state->base.legacy_cursor_update = false;
11227 ret = intel_atomic_prepare_commit(state);
11229 drm_dbg_atomic(&dev_priv->drm,
11230 "Preparing state failed with %i\n", ret);
11231 i915_sw_fence_commit(&state->commit_ready);
11232 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11236 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
11238 ret = drm_atomic_helper_swap_state(&state->base, true);
11240 intel_atomic_swap_global_state(state);
11243 struct intel_crtc_state *new_crtc_state;
11244 struct intel_crtc *crtc;
11247 i915_sw_fence_commit(&state->commit_ready);
11249 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11250 intel_dsb_cleanup(new_crtc_state);
11252 drm_atomic_helper_cleanup_planes(dev, &state->base);
11253 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11256 intel_shared_dpll_swap_state(state);
11257 intel_atomic_track_fbs(state);
11259 drm_atomic_state_get(&state->base);
11260 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
11262 i915_sw_fence_commit(&state->commit_ready);
11263 if (nonblock && state->modeset) {
11264 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
11265 } else if (nonblock) {
11266 queue_work(dev_priv->flip_wq, &state->base.commit_work);
11268 if (state->modeset)
11269 flush_workqueue(dev_priv->modeset_wq);
11270 intel_atomic_commit_tail(state);
11276 struct wait_rps_boost {
11277 struct wait_queue_entry wait;
11279 struct drm_crtc *crtc;
11280 struct i915_request *request;
11283 static int do_rps_boost(struct wait_queue_entry *_wait,
11284 unsigned mode, int sync, void *key)
11286 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
11287 struct i915_request *rq = wait->request;
11290 * If we missed the vblank, but the request is already running it
11291 * is reasonable to assume that it will complete before the next
11292 * vblank without our intervention, so leave RPS alone.
11294 if (!i915_request_started(rq))
11295 intel_rps_boost(rq);
11296 i915_request_put(rq);
11298 drm_crtc_vblank_put(wait->crtc);
11300 list_del(&wait->wait.entry);
11305 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
11306 struct dma_fence *fence)
11308 struct wait_rps_boost *wait;
11310 if (!dma_fence_is_i915(fence))
11313 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
11316 if (drm_crtc_vblank_get(crtc))
11319 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
11321 drm_crtc_vblank_put(crtc);
11325 wait->request = to_request(dma_fence_get(fence));
11328 wait->wait.func = do_rps_boost;
11329 wait->wait.flags = 0;
11331 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
11334 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
11336 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11337 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11338 struct drm_framebuffer *fb = plane_state->hw.fb;
11339 struct i915_vma *vma;
11341 if (plane->id == PLANE_CURSOR &&
11342 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
11343 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11344 const int align = intel_cursor_alignment(dev_priv);
11347 err = i915_gem_object_attach_phys(obj, align);
11352 vma = intel_pin_and_fence_fb_obj(fb,
11353 &plane_state->view,
11354 intel_plane_uses_fence(plane_state),
11355 &plane_state->flags);
11357 return PTR_ERR(vma);
11359 plane_state->vma = vma;
11364 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
11366 struct i915_vma *vma;
11368 vma = fetch_and_zero(&old_plane_state->vma);
11370 intel_unpin_fb_vma(vma, old_plane_state->flags);
11374 * intel_prepare_plane_fb - Prepare fb for usage on plane
11375 * @_plane: drm plane to prepare for
11376 * @_new_plane_state: the plane state being prepared
11378 * Prepares a framebuffer for usage on a display plane. Generally this
11379 * involves pinning the underlying object and updating the frontbuffer tracking
11380 * bits. Some older platforms need special physical address handling for
11383 * Returns 0 on success, negative error code on failure.
11386 intel_prepare_plane_fb(struct drm_plane *_plane,
11387 struct drm_plane_state *_new_plane_state)
11389 struct i915_sched_attr attr = {
11390 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
11392 struct intel_plane *plane = to_intel_plane(_plane);
11393 struct intel_plane_state *new_plane_state =
11394 to_intel_plane_state(_new_plane_state);
11395 struct intel_atomic_state *state =
11396 to_intel_atomic_state(new_plane_state->uapi.state);
11397 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11398 const struct intel_plane_state *old_plane_state =
11399 intel_atomic_get_old_plane_state(state, plane);
11400 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11401 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11405 const struct intel_crtc_state *crtc_state =
11406 intel_atomic_get_new_crtc_state(state,
11407 to_intel_crtc(old_plane_state->hw.crtc));
11409 /* Big Hammer, we also need to ensure that any pending
11410 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11411 * current scanout is retired before unpinning the old
11412 * framebuffer. Note that we rely on userspace rendering
11413 * into the buffer attached to the pipe they are waiting
11414 * on. If not, userspace generates a GPU hang with IPEHR
11415 * point to the MI_WAIT_FOR_EVENT.
11417 * This should only fail upon a hung GPU, in which case we
11418 * can safely continue.
11420 if (intel_crtc_needs_modeset(crtc_state)) {
11421 ret = i915_sw_fence_await_reservation(&state->commit_ready,
11422 old_obj->base.resv, NULL,
11430 if (new_plane_state->uapi.fence) { /* explicit fencing */
11431 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11433 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11434 new_plane_state->uapi.fence,
11435 i915_fence_timeout(dev_priv),
11444 ret = i915_gem_object_pin_pages(obj);
11448 ret = intel_plane_pin_fb(new_plane_state);
11450 i915_gem_object_unpin_pages(obj);
11454 i915_gem_object_wait_priority(obj, 0, &attr);
11455 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11457 if (!new_plane_state->uapi.fence) { /* implicit fencing */
11458 struct dma_fence *fence;
11460 ret = i915_sw_fence_await_reservation(&state->commit_ready,
11461 obj->base.resv, NULL,
11463 i915_fence_timeout(dev_priv),
11468 fence = dma_resv_get_excl_rcu(obj->base.resv);
11470 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11472 dma_fence_put(fence);
11475 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11476 new_plane_state->uapi.fence);
11480 * We declare pageflips to be interactive and so merit a small bias
11481 * towards upclocking to deliver the frame on time. By only changing
11482 * the RPS thresholds to sample more regularly and aim for higher
11483 * clocks we can hopefully deliver low power workloads (like kodi)
11484 * that are not quite steady state without resorting to forcing
11485 * maximum clocks following a vblank miss (see do_rps_boost()).
11487 if (!state->rps_interactive) {
11488 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11489 state->rps_interactive = true;
11495 intel_plane_unpin_fb(new_plane_state);
11501 * intel_cleanup_plane_fb - Cleans up an fb after plane use
11502 * @plane: drm plane to clean up for
11503 * @_old_plane_state: the state from the previous modeset
11505 * Cleans up a framebuffer that has just been removed from a plane.
11508 intel_cleanup_plane_fb(struct drm_plane *plane,
11509 struct drm_plane_state *_old_plane_state)
11511 struct intel_plane_state *old_plane_state =
11512 to_intel_plane_state(_old_plane_state);
11513 struct intel_atomic_state *state =
11514 to_intel_atomic_state(old_plane_state->uapi.state);
11515 struct drm_i915_private *dev_priv = to_i915(plane->dev);
11516 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11521 if (state->rps_interactive) {
11522 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11523 state->rps_interactive = false;
11526 /* Should only be called after a successful intel_prepare_plane_fb()! */
11527 intel_plane_unpin_fb(old_plane_state);
11531 * intel_plane_destroy - destroy a plane
11532 * @plane: plane to destroy
11534 * Common destruction function for all types of planes (primary, cursor,
11537 void intel_plane_destroy(struct drm_plane *plane)
11539 drm_plane_cleanup(plane);
11540 kfree(to_intel_plane(plane));
11543 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11545 struct intel_plane *plane;
11547 for_each_intel_plane(&dev_priv->drm, plane) {
11548 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11551 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11556 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11557 struct drm_file *file)
11559 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11560 struct drm_crtc *drmmode_crtc;
11561 struct intel_crtc *crtc;
11563 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11567 crtc = to_intel_crtc(drmmode_crtc);
11568 pipe_from_crtc_id->pipe = crtc->pipe;
11573 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11575 struct drm_device *dev = encoder->base.dev;
11576 struct intel_encoder *source_encoder;
11577 u32 possible_clones = 0;
11579 for_each_intel_encoder(dev, source_encoder) {
11580 if (encoders_cloneable(encoder, source_encoder))
11581 possible_clones |= drm_encoder_mask(&source_encoder->base);
11584 return possible_clones;
11587 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11589 struct drm_device *dev = encoder->base.dev;
11590 struct intel_crtc *crtc;
11591 u32 possible_crtcs = 0;
11593 for_each_intel_crtc(dev, crtc) {
11594 if (encoder->pipe_mask & BIT(crtc->pipe))
11595 possible_crtcs |= drm_crtc_mask(&crtc->base);
11598 return possible_crtcs;
11601 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11603 if (!IS_MOBILE(dev_priv))
11606 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11609 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11615 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11617 if (INTEL_GEN(dev_priv) >= 9)
11620 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11623 if (HAS_PCH_LPT_H(dev_priv) &&
11624 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11627 /* DDI E can't be used if DDI A requires 4 lanes */
11628 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11631 if (!dev_priv->vbt.int_crt_support)
11637 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11639 struct intel_encoder *encoder;
11640 bool dpd_is_edp = false;
11642 intel_pps_unlock_regs_wa(dev_priv);
11644 if (!HAS_DISPLAY(dev_priv))
11647 if (IS_ALDERLAKE_S(dev_priv)) {
11648 intel_ddi_init(dev_priv, PORT_A);
11649 intel_ddi_init(dev_priv, PORT_TC1);
11650 intel_ddi_init(dev_priv, PORT_TC2);
11651 intel_ddi_init(dev_priv, PORT_TC3);
11652 intel_ddi_init(dev_priv, PORT_TC4);
11653 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11654 intel_ddi_init(dev_priv, PORT_A);
11655 intel_ddi_init(dev_priv, PORT_B);
11656 intel_ddi_init(dev_priv, PORT_TC1);
11657 intel_ddi_init(dev_priv, PORT_TC2);
11658 } else if (INTEL_GEN(dev_priv) >= 12) {
11659 intel_ddi_init(dev_priv, PORT_A);
11660 intel_ddi_init(dev_priv, PORT_B);
11661 intel_ddi_init(dev_priv, PORT_TC1);
11662 intel_ddi_init(dev_priv, PORT_TC2);
11663 intel_ddi_init(dev_priv, PORT_TC3);
11664 intel_ddi_init(dev_priv, PORT_TC4);
11665 intel_ddi_init(dev_priv, PORT_TC5);
11666 intel_ddi_init(dev_priv, PORT_TC6);
11667 icl_dsi_init(dev_priv);
11668 } else if (IS_JSL_EHL(dev_priv)) {
11669 intel_ddi_init(dev_priv, PORT_A);
11670 intel_ddi_init(dev_priv, PORT_B);
11671 intel_ddi_init(dev_priv, PORT_C);
11672 intel_ddi_init(dev_priv, PORT_D);
11673 icl_dsi_init(dev_priv);
11674 } else if (IS_GEN(dev_priv, 11)) {
11675 intel_ddi_init(dev_priv, PORT_A);
11676 intel_ddi_init(dev_priv, PORT_B);
11677 intel_ddi_init(dev_priv, PORT_C);
11678 intel_ddi_init(dev_priv, PORT_D);
11679 intel_ddi_init(dev_priv, PORT_E);
11681 * On some ICL SKUs port F is not present. No strap bits for
11682 * this, so rely on VBT.
11683 * Work around broken VBTs on SKUs known to have no port F.
11685 if (IS_ICL_WITH_PORT_F(dev_priv) &&
11686 intel_bios_is_port_present(dev_priv, PORT_F))
11687 intel_ddi_init(dev_priv, PORT_F);
11689 icl_dsi_init(dev_priv);
11690 } else if (IS_GEN9_LP(dev_priv)) {
11692 * FIXME: Broxton doesn't support port detection via the
11693 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
11694 * detect the ports.
11696 intel_ddi_init(dev_priv, PORT_A);
11697 intel_ddi_init(dev_priv, PORT_B);
11698 intel_ddi_init(dev_priv, PORT_C);
11700 vlv_dsi_init(dev_priv);
11701 } else if (HAS_DDI(dev_priv)) {
11704 if (intel_ddi_crt_present(dev_priv))
11705 intel_crt_init(dev_priv);
11708 * Haswell uses DDI functions to detect digital outputs.
11709 * On SKL pre-D0 the strap isn't connected. Later SKUs may or
11710 * may not have it - it was supposed to be fixed by the same
11711 * time we stopped using straps. Assume it's there.
11713 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11714 /* WaIgnoreDDIAStrap: skl */
11715 if (found || IS_GEN9_BC(dev_priv))
11716 intel_ddi_init(dev_priv, PORT_A);
11718 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
11720 if (HAS_PCH_TGP(dev_priv)) {
11721 /* W/A due to lack of STRAP config on TGP PCH*/
11722 found = (SFUSE_STRAP_DDIB_DETECTED |
11723 SFUSE_STRAP_DDIC_DETECTED |
11724 SFUSE_STRAP_DDID_DETECTED);
11726 found = intel_de_read(dev_priv, SFUSE_STRAP);
11729 if (found & SFUSE_STRAP_DDIB_DETECTED)
11730 intel_ddi_init(dev_priv, PORT_B);
11731 if (found & SFUSE_STRAP_DDIC_DETECTED)
11732 intel_ddi_init(dev_priv, PORT_C);
11733 if (found & SFUSE_STRAP_DDID_DETECTED)
11734 intel_ddi_init(dev_priv, PORT_D);
11735 if (found & SFUSE_STRAP_DDIF_DETECTED)
11736 intel_ddi_init(dev_priv, PORT_F);
11738 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
11740 if (IS_GEN9_BC(dev_priv) &&
11741 intel_bios_is_port_present(dev_priv, PORT_E))
11742 intel_ddi_init(dev_priv, PORT_E);
11744 } else if (HAS_PCH_SPLIT(dev_priv)) {
11748 * intel_edp_init_connector() depends on this completing first,
11749 * to prevent the registration of both eDP and LVDS and the
11750 * incorrect sharing of the PPS.
11752 intel_lvds_init(dev_priv);
11753 intel_crt_init(dev_priv);
11755 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11757 if (ilk_has_edp_a(dev_priv))
11758 g4x_dp_init(dev_priv, DP_A, PORT_A);
11760 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11761 /* PCH SDVOB multiplex with HDMIB */
11762 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11764 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11765 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11766 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11769 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11770 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11772 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11773 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11775 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11776 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11778 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11779 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11780 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11781 bool has_edp, has_port;
11783 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11784 intel_crt_init(dev_priv);
11787 * The DP_DETECTED bit is the latched state of the DDC
11788 * SDA pin at boot. However since eDP doesn't require DDC
11789 * (no way to plug in a DP->HDMI dongle) the DDC pins for
11790 * eDP ports may have been muxed to an alternate function.
11791 * Thus we can't rely on the DP_DETECTED bit alone to detect
11792 * eDP ports. Consult the VBT as well as DP_DETECTED to
11793 * detect eDP ports.
11795 * Sadly the straps seem to be missing sometimes even for HDMI
11796 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11797 * and VBT for the presence of the port. Additionally we can't
11798 * trust the port type the VBT declares as we've seen at least
11799 * HDMI ports that the VBT claim are DP or eDP.
11801 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11802 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11803 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11804 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11805 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11806 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11808 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11809 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11810 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11811 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11812 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11813 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11815 if (IS_CHERRYVIEW(dev_priv)) {
11817 * eDP not supported on port D,
11818 * so no need to worry about it
11820 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11821 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11822 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11823 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11824 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11827 vlv_dsi_init(dev_priv);
11828 } else if (IS_PINEVIEW(dev_priv)) {
11829 intel_lvds_init(dev_priv);
11830 intel_crt_init(dev_priv);
11831 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
11832 bool found = false;
11834 if (IS_MOBILE(dev_priv))
11835 intel_lvds_init(dev_priv);
11837 intel_crt_init(dev_priv);
11839 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11840 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11841 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11842 if (!found && IS_G4X(dev_priv)) {
11843 drm_dbg_kms(&dev_priv->drm,
11844 "probing HDMI on SDVOB\n");
11845 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11848 if (!found && IS_G4X(dev_priv))
11849 g4x_dp_init(dev_priv, DP_B, PORT_B);
11852 /* Before G4X SDVOC doesn't have its own detect register */
11854 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11855 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11856 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11859 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11861 if (IS_G4X(dev_priv)) {
11862 drm_dbg_kms(&dev_priv->drm,
11863 "probing HDMI on SDVOC\n");
11864 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11866 if (IS_G4X(dev_priv))
11867 g4x_dp_init(dev_priv, DP_C, PORT_C);
11870 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11871 g4x_dp_init(dev_priv, DP_D, PORT_D);
11873 if (SUPPORTS_TV(dev_priv))
11874 intel_tv_init(dev_priv);
11875 } else if (IS_GEN(dev_priv, 2)) {
11876 if (IS_I85X(dev_priv))
11877 intel_lvds_init(dev_priv);
11879 intel_crt_init(dev_priv);
11880 intel_dvo_init(dev_priv);
11883 for_each_intel_encoder(&dev_priv->drm, encoder) {
11884 encoder->base.possible_crtcs =
11885 intel_encoder_possible_crtcs(encoder);
11886 encoder->base.possible_clones =
11887 intel_encoder_possible_clones(encoder);
11890 intel_init_pch_refclk(dev_priv);
11892 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11895 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11897 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11899 drm_framebuffer_cleanup(fb);
11900 intel_frontbuffer_put(intel_fb->frontbuffer);
11905 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11906 struct drm_file *file,
11907 unsigned int *handle)
11909 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11910 struct drm_i915_private *i915 = to_i915(obj->base.dev);
11912 if (obj->userptr.mm) {
11913 drm_dbg(&i915->drm,
11914 "attempting to use a userptr for a framebuffer, denied\n");
11918 return drm_gem_handle_create(file, &obj->base, handle);
11921 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11922 struct drm_file *file,
11923 unsigned flags, unsigned color,
11924 struct drm_clip_rect *clips,
11925 unsigned num_clips)
11927 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11929 i915_gem_object_flush_if_display(obj);
11930 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11935 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11936 .destroy = intel_user_framebuffer_destroy,
11937 .create_handle = intel_user_framebuffer_create_handle,
11938 .dirty = intel_user_framebuffer_dirty,
11941 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11942 struct drm_i915_gem_object *obj,
11943 struct drm_mode_fb_cmd2 *mode_cmd)
11945 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11946 struct drm_framebuffer *fb = &intel_fb->base;
11948 unsigned int tiling, stride;
11952 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11953 if (!intel_fb->frontbuffer)
11956 i915_gem_object_lock(obj, NULL);
11957 tiling = i915_gem_object_get_tiling(obj);
11958 stride = i915_gem_object_get_stride(obj);
11959 i915_gem_object_unlock(obj);
11961 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11963 * If there's a fence, enforce that
11964 * the fb modifier and tiling mode match.
11966 if (tiling != I915_TILING_NONE &&
11967 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11968 drm_dbg_kms(&dev_priv->drm,
11969 "tiling_mode doesn't match fb modifier\n");
11973 if (tiling == I915_TILING_X) {
11974 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11975 } else if (tiling == I915_TILING_Y) {
11976 drm_dbg_kms(&dev_priv->drm,
11977 "No Y tiling for legacy addfb\n");
11982 if (!drm_any_plane_has_format(&dev_priv->drm,
11983 mode_cmd->pixel_format,
11984 mode_cmd->modifier[0])) {
11985 struct drm_format_name_buf format_name;
11987 drm_dbg_kms(&dev_priv->drm,
11988 "unsupported pixel format %s / modifier 0x%llx\n",
11989 drm_get_format_name(mode_cmd->pixel_format,
11991 mode_cmd->modifier[0]);
11996 * gen2/3 display engine uses the fence if present,
11997 * so the tiling mode must match the fb modifier exactly.
11999 if (INTEL_GEN(dev_priv) < 4 &&
12000 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
12001 drm_dbg_kms(&dev_priv->drm,
12002 "tiling_mode must match fb modifier exactly on gen2/3\n");
12006 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
12007 mode_cmd->modifier[0]);
12008 if (mode_cmd->pitches[0] > max_stride) {
12009 drm_dbg_kms(&dev_priv->drm,
12010 "%s pitch (%u) must be at most %d\n",
12011 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
12012 "tiled" : "linear",
12013 mode_cmd->pitches[0], max_stride);
12018 * If there's a fence, enforce that
12019 * the fb pitch and fence stride match.
12021 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
12022 drm_dbg_kms(&dev_priv->drm,
12023 "pitch (%d) must match tiling stride (%d)\n",
12024 mode_cmd->pitches[0], stride);
12028 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12029 if (mode_cmd->offsets[0] != 0) {
12030 drm_dbg_kms(&dev_priv->drm,
12031 "plane 0 offset (0x%08x) must be 0\n",
12032 mode_cmd->offsets[0]);
12036 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
12038 for (i = 0; i < fb->format->num_planes; i++) {
12039 u32 stride_alignment;
12041 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
12042 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
12047 stride_alignment = intel_fb_stride_alignment(fb, i);
12048 if (fb->pitches[i] & (stride_alignment - 1)) {
12049 drm_dbg_kms(&dev_priv->drm,
12050 "plane %d pitch (%d) must be at least %u byte aligned\n",
12051 i, fb->pitches[i], stride_alignment);
12055 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
12056 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
12058 if (fb->pitches[i] != ccs_aux_stride) {
12059 drm_dbg_kms(&dev_priv->drm,
12060 "ccs aux plane %d pitch (%d) must be %d\n",
12062 fb->pitches[i], ccs_aux_stride);
12067 fb->obj[i] = &obj->base;
12070 ret = intel_fill_fb_info(dev_priv, fb);
12074 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
12076 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
12083 intel_frontbuffer_put(intel_fb->frontbuffer);
12087 static struct drm_framebuffer *
12088 intel_user_framebuffer_create(struct drm_device *dev,
12089 struct drm_file *filp,
12090 const struct drm_mode_fb_cmd2 *user_mode_cmd)
12092 struct drm_framebuffer *fb;
12093 struct drm_i915_gem_object *obj;
12094 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
12096 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
12098 return ERR_PTR(-ENOENT);
12100 fb = intel_framebuffer_create(obj, &mode_cmd);
12101 i915_gem_object_put(obj);
12106 static enum drm_mode_status
12107 intel_mode_valid(struct drm_device *dev,
12108 const struct drm_display_mode *mode)
12110 struct drm_i915_private *dev_priv = to_i915(dev);
12111 int hdisplay_max, htotal_max;
12112 int vdisplay_max, vtotal_max;
12115 * Can't reject DBLSCAN here because Xorg ddxen can add piles
12116 * of DBLSCAN modes to the output's mode list when they detect
12117 * the scaling mode property on the connector. And they don't
12118 * ask the kernel to validate those modes in any way until
12119 * modeset time at which point the client gets a protocol error.
12120 * So in order to not upset those clients we silently ignore the
12121 * DBLSCAN flag on such connectors. For other connectors we will
12122 * reject modes with the DBLSCAN flag in encoder->compute_config().
12123 * And we always reject DBLSCAN modes in connector->mode_valid()
12124 * as we never want such modes on the connector's mode list.
12127 if (mode->vscan > 1)
12128 return MODE_NO_VSCAN;
12130 if (mode->flags & DRM_MODE_FLAG_HSKEW)
12131 return MODE_H_ILLEGAL;
12133 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
12134 DRM_MODE_FLAG_NCSYNC |
12135 DRM_MODE_FLAG_PCSYNC))
12138 if (mode->flags & (DRM_MODE_FLAG_BCAST |
12139 DRM_MODE_FLAG_PIXMUX |
12140 DRM_MODE_FLAG_CLKDIV2))
12143 /* Transcoder timing limits */
12144 if (INTEL_GEN(dev_priv) >= 11) {
12145 hdisplay_max = 16384;
12146 vdisplay_max = 8192;
12147 htotal_max = 16384;
12149 } else if (INTEL_GEN(dev_priv) >= 9 ||
12150 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12151 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
12152 vdisplay_max = 4096;
12155 } else if (INTEL_GEN(dev_priv) >= 3) {
12156 hdisplay_max = 4096;
12157 vdisplay_max = 4096;
12161 hdisplay_max = 2048;
12162 vdisplay_max = 2048;
12167 if (mode->hdisplay > hdisplay_max ||
12168 mode->hsync_start > htotal_max ||
12169 mode->hsync_end > htotal_max ||
12170 mode->htotal > htotal_max)
12171 return MODE_H_ILLEGAL;
12173 if (mode->vdisplay > vdisplay_max ||
12174 mode->vsync_start > vtotal_max ||
12175 mode->vsync_end > vtotal_max ||
12176 mode->vtotal > vtotal_max)
12177 return MODE_V_ILLEGAL;
12179 if (INTEL_GEN(dev_priv) >= 5) {
12180 if (mode->hdisplay < 64 ||
12181 mode->htotal - mode->hdisplay < 32)
12182 return MODE_H_ILLEGAL;
12184 if (mode->vtotal - mode->vdisplay < 5)
12185 return MODE_V_ILLEGAL;
12187 if (mode->htotal - mode->hdisplay < 32)
12188 return MODE_H_ILLEGAL;
12190 if (mode->vtotal - mode->vdisplay < 3)
12191 return MODE_V_ILLEGAL;
12197 enum drm_mode_status
12198 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
12199 const struct drm_display_mode *mode,
12202 int plane_width_max, plane_height_max;
12205 * intel_mode_valid() should be
12206 * sufficient on older platforms.
12208 if (INTEL_GEN(dev_priv) < 9)
12212 * Most people will probably want a fullscreen
12213 * plane so let's not advertize modes that are
12214 * too big for that.
12216 if (INTEL_GEN(dev_priv) >= 11) {
12217 plane_width_max = 5120 << bigjoiner;
12218 plane_height_max = 4320;
12220 plane_width_max = 5120;
12221 plane_height_max = 4096;
12224 if (mode->hdisplay > plane_width_max)
12225 return MODE_H_ILLEGAL;
12227 if (mode->vdisplay > plane_height_max)
12228 return MODE_V_ILLEGAL;
12233 static const struct drm_mode_config_funcs intel_mode_funcs = {
12234 .fb_create = intel_user_framebuffer_create,
12235 .get_format_info = intel_get_format_info,
12236 .output_poll_changed = intel_fbdev_output_poll_changed,
12237 .mode_valid = intel_mode_valid,
12238 .atomic_check = intel_atomic_check,
12239 .atomic_commit = intel_atomic_commit,
12240 .atomic_state_alloc = intel_atomic_state_alloc,
12241 .atomic_state_clear = intel_atomic_state_clear,
12242 .atomic_state_free = intel_atomic_state_free,
12246 * intel_init_display_hooks - initialize the display modesetting hooks
12247 * @dev_priv: device private
12249 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
12251 intel_init_cdclk_hooks(dev_priv);
12252 intel_init_audio_hooks(dev_priv);
12254 intel_dpll_init_clock_hook(dev_priv);
12256 if (INTEL_GEN(dev_priv) >= 9) {
12257 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12258 dev_priv->display.crtc_enable = hsw_crtc_enable;
12259 dev_priv->display.crtc_disable = hsw_crtc_disable;
12260 } else if (HAS_DDI(dev_priv)) {
12261 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12262 dev_priv->display.crtc_enable = hsw_crtc_enable;
12263 dev_priv->display.crtc_disable = hsw_crtc_disable;
12264 } else if (HAS_PCH_SPLIT(dev_priv)) {
12265 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
12266 dev_priv->display.crtc_enable = ilk_crtc_enable;
12267 dev_priv->display.crtc_disable = ilk_crtc_disable;
12268 } else if (IS_CHERRYVIEW(dev_priv) ||
12269 IS_VALLEYVIEW(dev_priv)) {
12270 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12271 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12272 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12274 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12275 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12276 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12279 intel_fdi_init_hook(dev_priv);
12281 if (INTEL_GEN(dev_priv) >= 9) {
12282 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
12283 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
12285 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
12286 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
12291 void intel_modeset_init_hw(struct drm_i915_private *i915)
12293 struct intel_cdclk_state *cdclk_state =
12294 to_intel_cdclk_state(i915->cdclk.obj.state);
12296 intel_update_cdclk(i915);
12297 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
12298 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
12301 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
12303 struct drm_plane *plane;
12304 struct intel_crtc *crtc;
12306 for_each_intel_crtc(state->dev, crtc) {
12307 struct intel_crtc_state *crtc_state;
12309 crtc_state = intel_atomic_get_crtc_state(state, crtc);
12310 if (IS_ERR(crtc_state))
12311 return PTR_ERR(crtc_state);
12313 if (crtc_state->hw.active) {
12315 * Preserve the inherited flag to avoid
12316 * taking the full modeset path.
12318 crtc_state->inherited = true;
12322 drm_for_each_plane(plane, state->dev) {
12323 struct drm_plane_state *plane_state;
12325 plane_state = drm_atomic_get_plane_state(state, plane);
12326 if (IS_ERR(plane_state))
12327 return PTR_ERR(plane_state);
12334 * Calculate what we think the watermarks should be for the state we've read
12335 * out of the hardware and then immediately program those watermarks so that
12336 * we ensure the hardware settings match our internal state.
12338 * We can calculate what we think WM's should be by creating a duplicate of the
12339 * current state (which was constructed during hardware readout) and running it
12340 * through the atomic check code to calculate new watermark values in the
12343 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
12345 struct drm_atomic_state *state;
12346 struct intel_atomic_state *intel_state;
12347 struct intel_crtc *crtc;
12348 struct intel_crtc_state *crtc_state;
12349 struct drm_modeset_acquire_ctx ctx;
12353 /* Only supported on platforms that use atomic watermark design */
12354 if (!dev_priv->display.optimize_watermarks)
12357 state = drm_atomic_state_alloc(&dev_priv->drm);
12358 if (drm_WARN_ON(&dev_priv->drm, !state))
12361 intel_state = to_intel_atomic_state(state);
12363 drm_modeset_acquire_init(&ctx, 0);
12366 state->acquire_ctx = &ctx;
12369 * Hardware readout is the only time we don't want to calculate
12370 * intermediate watermarks (since we don't trust the current
12373 if (!HAS_GMCH(dev_priv))
12374 intel_state->skip_intermediate_wm = true;
12376 ret = sanitize_watermarks_add_affected(state);
12380 ret = intel_atomic_check(&dev_priv->drm, state);
12384 /* Write calculated watermark values back */
12385 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12386 crtc_state->wm.need_postvbl_update = true;
12387 dev_priv->display.optimize_watermarks(intel_state, crtc);
12389 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12393 if (ret == -EDEADLK) {
12394 drm_atomic_state_clear(state);
12395 drm_modeset_backoff(&ctx);
12400 * If we fail here, it means that the hardware appears to be
12401 * programmed in a way that shouldn't be possible, given our
12402 * understanding of watermark requirements. This might mean a
12403 * mistake in the hardware readout code or a mistake in the
12404 * watermark calculations for a given platform. Raise a WARN
12405 * so that this is noticeable.
12407 * If this actually happens, we'll have to just leave the
12408 * BIOS-programmed watermarks untouched and hope for the best.
12410 drm_WARN(&dev_priv->drm, ret,
12411 "Could not determine valid watermarks for inherited state\n");
12413 drm_atomic_state_put(state);
12415 drm_modeset_drop_locks(&ctx);
12416 drm_modeset_acquire_fini(&ctx);
12419 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12421 if (IS_IRONLAKE(dev_priv)) {
12423 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12425 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12426 } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12427 dev_priv->fdi_pll_freq = 270000;
12432 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12435 static int intel_initial_commit(struct drm_device *dev)
12437 struct drm_atomic_state *state = NULL;
12438 struct drm_modeset_acquire_ctx ctx;
12439 struct intel_crtc *crtc;
12442 state = drm_atomic_state_alloc(dev);
12446 drm_modeset_acquire_init(&ctx, 0);
12449 state->acquire_ctx = &ctx;
12451 for_each_intel_crtc(dev, crtc) {
12452 struct intel_crtc_state *crtc_state =
12453 intel_atomic_get_crtc_state(state, crtc);
12455 if (IS_ERR(crtc_state)) {
12456 ret = PTR_ERR(crtc_state);
12460 if (crtc_state->hw.active) {
12461 struct intel_encoder *encoder;
12464 * We've not yet detected sink capabilities
12465 * (audio,infoframes,etc.) and thus we don't want to
12466 * force a full state recomputation yet. We want that to
12467 * happen only for the first real commit from userspace.
12468 * So preserve the inherited flag for the time being.
12470 crtc_state->inherited = true;
12472 ret = drm_atomic_add_affected_planes(state, &crtc->base);
12477 * FIXME hack to force a LUT update to avoid the
12478 * plane update forcing the pipe gamma on without
12479 * having a proper LUT loaded. Remove once we
12480 * have readout for pipe gamma enable.
12482 crtc_state->uapi.color_mgmt_changed = true;
12484 for_each_intel_encoder_mask(dev, encoder,
12485 crtc_state->uapi.encoder_mask) {
12486 if (encoder->initial_fastset_check &&
12487 !encoder->initial_fastset_check(encoder, crtc_state)) {
12488 ret = drm_atomic_add_affected_connectors(state,
12497 ret = drm_atomic_commit(state);
12500 if (ret == -EDEADLK) {
12501 drm_atomic_state_clear(state);
12502 drm_modeset_backoff(&ctx);
12506 drm_atomic_state_put(state);
12508 drm_modeset_drop_locks(&ctx);
12509 drm_modeset_acquire_fini(&ctx);
12514 static void intel_mode_config_init(struct drm_i915_private *i915)
12516 struct drm_mode_config *mode_config = &i915->drm.mode_config;
12518 drm_mode_config_init(&i915->drm);
12519 INIT_LIST_HEAD(&i915->global_obj_list);
12521 mode_config->min_width = 0;
12522 mode_config->min_height = 0;
12524 mode_config->preferred_depth = 24;
12525 mode_config->prefer_shadow = 1;
12527 mode_config->allow_fb_modifiers = true;
12529 mode_config->funcs = &intel_mode_funcs;
12531 mode_config->async_page_flip = has_async_flips(i915);
12534 * Maximum framebuffer dimensions, chosen to match
12535 * the maximum render engine surface size on gen4+.
12537 if (INTEL_GEN(i915) >= 7) {
12538 mode_config->max_width = 16384;
12539 mode_config->max_height = 16384;
12540 } else if (INTEL_GEN(i915) >= 4) {
12541 mode_config->max_width = 8192;
12542 mode_config->max_height = 8192;
12543 } else if (IS_GEN(i915, 3)) {
12544 mode_config->max_width = 4096;
12545 mode_config->max_height = 4096;
12547 mode_config->max_width = 2048;
12548 mode_config->max_height = 2048;
12551 if (IS_I845G(i915) || IS_I865G(i915)) {
12552 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12553 mode_config->cursor_height = 1023;
12554 } else if (IS_I830(i915) || IS_I85X(i915) ||
12555 IS_I915G(i915) || IS_I915GM(i915)) {
12556 mode_config->cursor_width = 64;
12557 mode_config->cursor_height = 64;
12559 mode_config->cursor_width = 256;
12560 mode_config->cursor_height = 256;
12564 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12566 intel_atomic_global_obj_cleanup(i915);
12567 drm_mode_config_cleanup(&i915->drm);
12570 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12572 if (plane_config->fb) {
12573 struct drm_framebuffer *fb = &plane_config->fb->base;
12575 /* We may only have the stub and not a full framebuffer */
12576 if (drm_framebuffer_read_refcount(fb))
12577 drm_framebuffer_put(fb);
12582 if (plane_config->vma)
12583 i915_vma_put(plane_config->vma);
12586 /* part #1: call before irq install */
12587 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12591 if (i915_inject_probe_failure(i915))
12594 if (HAS_DISPLAY(i915)) {
12595 ret = drm_vblank_init(&i915->drm,
12596 INTEL_NUM_PIPES(i915));
12601 intel_bios_init(i915);
12603 ret = intel_vga_register(i915);
12607 /* FIXME: completely on the wrong abstraction layer */
12608 intel_power_domains_init_hw(i915, false);
12610 intel_csr_ucode_init(i915);
12612 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12613 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12614 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12616 i915->framestart_delay = 1; /* 1-4 */
12618 intel_mode_config_init(i915);
12620 ret = intel_cdclk_init(i915);
12622 goto cleanup_vga_client_pw_domain_csr;
12624 ret = intel_dbuf_init(i915);
12626 goto cleanup_vga_client_pw_domain_csr;
12628 ret = intel_bw_init(i915);
12630 goto cleanup_vga_client_pw_domain_csr;
12632 init_llist_head(&i915->atomic_helper.free_list);
12633 INIT_WORK(&i915->atomic_helper.free_work,
12634 intel_atomic_helper_free_state_worker);
12636 intel_init_quirks(i915);
12638 intel_fbc_init(i915);
12642 cleanup_vga_client_pw_domain_csr:
12643 intel_csr_ucode_fini(i915);
12644 intel_power_domains_driver_remove(i915);
12645 intel_vga_unregister(i915);
12647 intel_bios_driver_remove(i915);
12652 /* part #2: call after irq install, but before gem init */
12653 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12655 struct drm_device *dev = &i915->drm;
12657 struct intel_crtc *crtc;
12660 intel_init_pm(i915);
12662 intel_panel_sanitize_ssc(i915);
12664 intel_pps_setup(i915);
12666 intel_gmbus_setup(i915);
12668 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12669 INTEL_NUM_PIPES(i915),
12670 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12672 if (HAS_DISPLAY(i915)) {
12673 for_each_pipe(i915, pipe) {
12674 ret = intel_crtc_init(i915, pipe);
12676 intel_mode_config_cleanup(i915);
12682 intel_plane_possible_crtcs_init(i915);
12683 intel_shared_dpll_init(dev);
12684 intel_update_fdi_pll_freq(i915);
12686 intel_update_czclk(i915);
12687 intel_modeset_init_hw(i915);
12688 intel_dpll_update_ref_clks(i915);
12690 intel_hdcp_component_init(i915);
12692 if (i915->max_cdclk_freq == 0)
12693 intel_update_max_cdclk(i915);
12696 * If the platform has HTI, we need to find out whether it has reserved
12697 * any display resources before we create our display outputs.
12699 if (INTEL_INFO(i915)->display.has_hti)
12700 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12702 /* Just disable it once at startup */
12703 intel_vga_disable(i915);
12704 intel_setup_outputs(i915);
12706 drm_modeset_lock_all(dev);
12707 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12708 drm_modeset_unlock_all(dev);
12710 for_each_intel_crtc(dev, crtc) {
12711 struct intel_initial_plane_config plane_config = {};
12713 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12717 * Note that reserving the BIOS fb up front prevents us
12718 * from stuffing other stolen allocations like the ring
12719 * on top. This prevents some ugliness at boot time, and
12720 * can even allow for smooth boot transitions if the BIOS
12721 * fb is large enough for the active pipe configuration.
12723 i915->display.get_initial_plane_config(crtc, &plane_config);
12726 * If the fb is shared between multiple heads, we'll
12727 * just get the first one.
12729 intel_find_initial_plane_obj(crtc, &plane_config);
12731 plane_config_fini(&plane_config);
12735 * Make sure hardware watermarks really match the state we read out.
12736 * Note that we need to do this after reconstructing the BIOS fb's
12737 * since the watermark calculation done here will use pstate->fb.
12739 if (!HAS_GMCH(i915))
12740 sanitize_watermarks(i915);
12745 /* part #3: call after gem init */
12746 int intel_modeset_init(struct drm_i915_private *i915)
12750 if (!HAS_DISPLAY(i915))
12754 * Force all active planes to recompute their states. So that on
12755 * mode_setcrtc after probe, all the intel_plane_state variables
12756 * are already calculated and there is no assert_plane warnings
12759 ret = intel_initial_commit(&i915->drm);
12761 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12763 intel_overlay_setup(i915);
12765 ret = intel_fbdev_init(&i915->drm);
12769 /* Only enable hotplug handling once the fbdev is fully set up. */
12770 intel_hpd_init(i915);
12771 intel_hpd_poll_disable(i915);
12773 intel_init_ipc(i915);
12778 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12780 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12781 /* 640x480@60Hz, ~25175 kHz */
12782 struct dpll clock = {
12792 drm_WARN_ON(&dev_priv->drm,
12793 i9xx_calc_dpll_params(48000, &clock) != 25154);
12795 drm_dbg_kms(&dev_priv->drm,
12796 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12797 pipe_name(pipe), clock.vco, clock.dot);
12799 fp = i9xx_dpll_compute_fp(&clock);
12800 dpll = DPLL_DVO_2X_MODE |
12801 DPLL_VGA_MODE_DIS |
12802 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12803 PLL_P2_DIVIDE_BY_4 |
12804 PLL_REF_INPUT_DREFCLK |
12807 intel_de_write(dev_priv, FP0(pipe), fp);
12808 intel_de_write(dev_priv, FP1(pipe), fp);
12810 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12811 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12812 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12813 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12814 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12815 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12816 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12819 * Apparently we need to have VGA mode enabled prior to changing
12820 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12821 * dividers, even though the register value does change.
12823 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12824 intel_de_write(dev_priv, DPLL(pipe), dpll);
12826 /* Wait for the clocks to stabilize. */
12827 intel_de_posting_read(dev_priv, DPLL(pipe));
12830 /* The pixel multiplier can only be updated once the
12831 * DPLL is enabled and the clocks are stable.
12833 * So write it again.
12835 intel_de_write(dev_priv, DPLL(pipe), dpll);
12837 /* We do this three times for luck */
12838 for (i = 0; i < 3 ; i++) {
12839 intel_de_write(dev_priv, DPLL(pipe), dpll);
12840 intel_de_posting_read(dev_priv, DPLL(pipe));
12841 udelay(150); /* wait for warmup */
12844 intel_de_write(dev_priv, PIPECONF(pipe),
12845 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12846 intel_de_posting_read(dev_priv, PIPECONF(pipe));
12848 intel_wait_for_pipe_scanline_moving(crtc);
12851 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12853 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12855 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12858 drm_WARN_ON(&dev_priv->drm,
12859 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12860 DISPLAY_PLANE_ENABLE);
12861 drm_WARN_ON(&dev_priv->drm,
12862 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12863 DISPLAY_PLANE_ENABLE);
12864 drm_WARN_ON(&dev_priv->drm,
12865 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12866 DISPLAY_PLANE_ENABLE);
12867 drm_WARN_ON(&dev_priv->drm,
12868 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12869 drm_WARN_ON(&dev_priv->drm,
12870 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12872 intel_de_write(dev_priv, PIPECONF(pipe), 0);
12873 intel_de_posting_read(dev_priv, PIPECONF(pipe));
12875 intel_wait_for_pipe_scanline_stopped(crtc);
12877 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12878 intel_de_posting_read(dev_priv, DPLL(pipe));
12882 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12884 struct intel_crtc *crtc;
12886 if (INTEL_GEN(dev_priv) >= 4)
12889 for_each_intel_crtc(&dev_priv->drm, crtc) {
12890 struct intel_plane *plane =
12891 to_intel_plane(crtc->base.primary);
12892 struct intel_crtc *plane_crtc;
12895 if (!plane->get_hw_state(plane, &pipe))
12898 if (pipe == crtc->pipe)
12901 drm_dbg_kms(&dev_priv->drm,
12902 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12903 plane->base.base.id, plane->base.name);
12905 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12906 intel_plane_disable_noatomic(plane_crtc, plane);
12910 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12912 struct drm_device *dev = crtc->base.dev;
12913 struct intel_encoder *encoder;
12915 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12921 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12923 struct drm_device *dev = encoder->base.dev;
12924 struct intel_connector *connector;
12926 for_each_connector_on_encoder(dev, &encoder->base, connector)
12932 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12933 enum pipe pch_transcoder)
12935 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12936 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12939 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12941 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12943 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12945 if (INTEL_GEN(dev_priv) >= 9 ||
12946 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12947 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12950 if (transcoder_is_dsi(cpu_transcoder))
12953 val = intel_de_read(dev_priv, reg);
12954 val &= ~HSW_FRAME_START_DELAY_MASK;
12955 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12956 intel_de_write(dev_priv, reg, val);
12958 i915_reg_t reg = PIPECONF(cpu_transcoder);
12961 val = intel_de_read(dev_priv, reg);
12962 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12963 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12964 intel_de_write(dev_priv, reg, val);
12967 if (!crtc_state->has_pch_encoder)
12970 if (HAS_PCH_IBX(dev_priv)) {
12971 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12974 val = intel_de_read(dev_priv, reg);
12975 val &= ~TRANS_FRAME_START_DELAY_MASK;
12976 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12977 intel_de_write(dev_priv, reg, val);
12979 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12980 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12983 val = intel_de_read(dev_priv, reg);
12984 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12985 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12986 intel_de_write(dev_priv, reg, val);
12990 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12991 struct drm_modeset_acquire_ctx *ctx)
12993 struct drm_device *dev = crtc->base.dev;
12994 struct drm_i915_private *dev_priv = to_i915(dev);
12995 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12997 if (crtc_state->hw.active) {
12998 struct intel_plane *plane;
13000 /* Clear any frame start delays used for debugging left by the BIOS */
13001 intel_sanitize_frame_start_delay(crtc_state);
13003 /* Disable everything but the primary plane */
13004 for_each_intel_plane_on_crtc(dev, crtc, plane) {
13005 const struct intel_plane_state *plane_state =
13006 to_intel_plane_state(plane->base.state);
13008 if (plane_state->uapi.visible &&
13009 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
13010 intel_plane_disable_noatomic(crtc, plane);
13014 * Disable any background color set by the BIOS, but enable the
13015 * gamma and CSC to match how we program our planes.
13017 if (INTEL_GEN(dev_priv) >= 9)
13018 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
13019 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
13022 /* Adjust the state of the output pipe according to whether we
13023 * have active connectors/encoders. */
13024 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
13025 !crtc_state->bigjoiner_slave)
13026 intel_crtc_disable_noatomic(crtc, ctx);
13028 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
13030 * We start out with underrun reporting disabled to avoid races.
13031 * For correct bookkeeping mark this on active crtcs.
13033 * Also on gmch platforms we dont have any hardware bits to
13034 * disable the underrun reporting. Which means we need to start
13035 * out with underrun reporting disabled also on inactive pipes,
13036 * since otherwise we'll complain about the garbage we read when
13037 * e.g. coming up after runtime pm.
13039 * No protection against concurrent access is required - at
13040 * worst a fifo underrun happens which also sets this to false.
13042 crtc->cpu_fifo_underrun_disabled = true;
13044 * We track the PCH trancoder underrun reporting state
13045 * within the crtc. With crtc for pipe A housing the underrun
13046 * reporting state for PCH transcoder A, crtc for pipe B housing
13047 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
13048 * and marking underrun reporting as disabled for the non-existing
13049 * PCH transcoders B and C would prevent enabling the south
13050 * error interrupt (see cpt_can_enable_serr_int()).
13052 if (has_pch_trancoder(dev_priv, crtc->pipe))
13053 crtc->pch_fifo_underrun_disabled = true;
13057 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
13059 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
13062 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
13063 * the hardware when a high res displays plugged in. DPLL P
13064 * divider is zero, and the pipe timings are bonkers. We'll
13065 * try to disable everything in that case.
13067 * FIXME would be nice to be able to sanitize this state
13068 * without several WARNs, but for now let's take the easy
13071 return IS_SANDYBRIDGE(dev_priv) &&
13072 crtc_state->hw.active &&
13073 crtc_state->shared_dpll &&
13074 crtc_state->port_clock == 0;
13077 static void intel_sanitize_encoder(struct intel_encoder *encoder)
13079 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
13080 struct intel_connector *connector;
13081 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
13082 struct intel_crtc_state *crtc_state = crtc ?
13083 to_intel_crtc_state(crtc->base.state) : NULL;
13085 /* We need to check both for a crtc link (meaning that the
13086 * encoder is active and trying to read from a pipe) and the
13087 * pipe itself being active. */
13088 bool has_active_crtc = crtc_state &&
13089 crtc_state->hw.active;
13091 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
13092 drm_dbg_kms(&dev_priv->drm,
13093 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
13094 pipe_name(crtc->pipe));
13095 has_active_crtc = false;
13098 connector = intel_encoder_find_connector(encoder);
13099 if (connector && !has_active_crtc) {
13100 drm_dbg_kms(&dev_priv->drm,
13101 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13102 encoder->base.base.id,
13103 encoder->base.name);
13105 /* Connector is active, but has no active pipe. This is
13106 * fallout from our resume register restoring. Disable
13107 * the encoder manually again. */
13109 struct drm_encoder *best_encoder;
13111 drm_dbg_kms(&dev_priv->drm,
13112 "[ENCODER:%d:%s] manually disabled\n",
13113 encoder->base.base.id,
13114 encoder->base.name);
13116 /* avoid oopsing in case the hooks consult best_encoder */
13117 best_encoder = connector->base.state->best_encoder;
13118 connector->base.state->best_encoder = &encoder->base;
13120 /* FIXME NULL atomic state passed! */
13121 if (encoder->disable)
13122 encoder->disable(NULL, encoder, crtc_state,
13123 connector->base.state);
13124 if (encoder->post_disable)
13125 encoder->post_disable(NULL, encoder, crtc_state,
13126 connector->base.state);
13128 connector->base.state->best_encoder = best_encoder;
13130 encoder->base.crtc = NULL;
13132 /* Inconsistent output/port/pipe state happens presumably due to
13133 * a bug in one of the get_hw_state functions. Or someplace else
13134 * in our code, like the register restore mess on resume. Clamp
13135 * things to off as a safer default. */
13137 connector->base.dpms = DRM_MODE_DPMS_OFF;
13138 connector->base.encoder = NULL;
13141 /* notify opregion of the sanitized encoder state */
13142 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
13144 if (HAS_DDI(dev_priv))
13145 intel_ddi_sanitize_encoder_pll_mapping(encoder);
13148 /* FIXME read out full plane state for all planes */
13149 static void readout_plane_state(struct drm_i915_private *dev_priv)
13151 struct intel_plane *plane;
13152 struct intel_crtc *crtc;
13154 for_each_intel_plane(&dev_priv->drm, plane) {
13155 struct intel_plane_state *plane_state =
13156 to_intel_plane_state(plane->base.state);
13157 struct intel_crtc_state *crtc_state;
13158 enum pipe pipe = PIPE_A;
13161 visible = plane->get_hw_state(plane, &pipe);
13163 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13164 crtc_state = to_intel_crtc_state(crtc->base.state);
13166 intel_set_plane_visible(crtc_state, plane_state, visible);
13168 drm_dbg_kms(&dev_priv->drm,
13169 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
13170 plane->base.base.id, plane->base.name,
13171 enableddisabled(visible), pipe_name(pipe));
13174 for_each_intel_crtc(&dev_priv->drm, crtc) {
13175 struct intel_crtc_state *crtc_state =
13176 to_intel_crtc_state(crtc->base.state);
13178 fixup_plane_bitmasks(crtc_state);
13182 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13184 struct drm_i915_private *dev_priv = to_i915(dev);
13185 struct intel_cdclk_state *cdclk_state =
13186 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
13187 struct intel_dbuf_state *dbuf_state =
13188 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
13190 struct intel_crtc *crtc;
13191 struct intel_encoder *encoder;
13192 struct intel_connector *connector;
13193 struct drm_connector_list_iter conn_iter;
13194 u8 active_pipes = 0;
13196 for_each_intel_crtc(dev, crtc) {
13197 struct intel_crtc_state *crtc_state =
13198 to_intel_crtc_state(crtc->base.state);
13200 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
13201 intel_crtc_free_hw_state(crtc_state);
13202 intel_crtc_state_reset(crtc_state, crtc);
13204 intel_crtc_get_pipe_config(crtc_state);
13206 crtc_state->hw.enable = crtc_state->hw.active;
13208 crtc->base.enabled = crtc_state->hw.enable;
13209 crtc->active = crtc_state->hw.active;
13211 if (crtc_state->hw.active)
13212 active_pipes |= BIT(crtc->pipe);
13214 drm_dbg_kms(&dev_priv->drm,
13215 "[CRTC:%d:%s] hw state readout: %s\n",
13216 crtc->base.base.id, crtc->base.name,
13217 enableddisabled(crtc_state->hw.active));
13220 dev_priv->active_pipes = cdclk_state->active_pipes =
13221 dbuf_state->active_pipes = active_pipes;
13223 readout_plane_state(dev_priv);
13225 for_each_intel_encoder(dev, encoder) {
13228 if (encoder->get_hw_state(encoder, &pipe)) {
13229 struct intel_crtc_state *crtc_state;
13231 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13232 crtc_state = to_intel_crtc_state(crtc->base.state);
13234 encoder->base.crtc = &crtc->base;
13235 intel_encoder_get_config(encoder, crtc_state);
13236 if (encoder->sync_state)
13237 encoder->sync_state(encoder, crtc_state);
13239 /* read out to slave crtc as well for bigjoiner */
13240 if (crtc_state->bigjoiner) {
13241 /* encoder should read be linked to bigjoiner master */
13242 WARN_ON(crtc_state->bigjoiner_slave);
13244 crtc = crtc_state->bigjoiner_linked_crtc;
13245 crtc_state = to_intel_crtc_state(crtc->base.state);
13246 intel_encoder_get_config(encoder, crtc_state);
13249 encoder->base.crtc = NULL;
13252 drm_dbg_kms(&dev_priv->drm,
13253 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13254 encoder->base.base.id, encoder->base.name,
13255 enableddisabled(encoder->base.crtc),
13259 intel_dpll_readout_hw_state(dev_priv);
13261 drm_connector_list_iter_begin(dev, &conn_iter);
13262 for_each_intel_connector_iter(connector, &conn_iter) {
13263 if (connector->get_hw_state(connector)) {
13264 struct intel_crtc_state *crtc_state;
13265 struct intel_crtc *crtc;
13267 connector->base.dpms = DRM_MODE_DPMS_ON;
13269 encoder = intel_attached_encoder(connector);
13270 connector->base.encoder = &encoder->base;
13272 crtc = to_intel_crtc(encoder->base.crtc);
13273 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
13275 if (crtc_state && crtc_state->hw.active) {
13277 * This has to be done during hardware readout
13278 * because anything calling .crtc_disable may
13279 * rely on the connector_mask being accurate.
13281 crtc_state->uapi.connector_mask |=
13282 drm_connector_mask(&connector->base);
13283 crtc_state->uapi.encoder_mask |=
13284 drm_encoder_mask(&encoder->base);
13287 connector->base.dpms = DRM_MODE_DPMS_OFF;
13288 connector->base.encoder = NULL;
13290 drm_dbg_kms(&dev_priv->drm,
13291 "[CONNECTOR:%d:%s] hw state readout: %s\n",
13292 connector->base.base.id, connector->base.name,
13293 enableddisabled(connector->base.encoder));
13295 drm_connector_list_iter_end(&conn_iter);
13297 for_each_intel_crtc(dev, crtc) {
13298 struct intel_bw_state *bw_state =
13299 to_intel_bw_state(dev_priv->bw_obj.state);
13300 struct intel_crtc_state *crtc_state =
13301 to_intel_crtc_state(crtc->base.state);
13302 struct intel_plane *plane;
13305 if (crtc_state->bigjoiner_slave)
13308 if (crtc_state->hw.active) {
13310 * The initial mode needs to be set in order to keep
13311 * the atomic core happy. It wants a valid mode if the
13312 * crtc's enabled, so we do the above call.
13314 * But we don't set all the derived state fully, hence
13315 * set a flag to indicate that a full recalculation is
13316 * needed on the next commit.
13318 crtc_state->inherited = true;
13320 intel_crtc_update_active_timings(crtc_state);
13322 intel_crtc_copy_hw_to_uapi_state(crtc_state);
13325 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13326 const struct intel_plane_state *plane_state =
13327 to_intel_plane_state(plane->base.state);
13330 * FIXME don't have the fb yet, so can't
13331 * use intel_plane_data_rate() :(
13333 if (plane_state->uapi.visible)
13334 crtc_state->data_rate[plane->id] =
13335 4 * crtc_state->pixel_rate;
13337 * FIXME don't have the fb yet, so can't
13338 * use plane->min_cdclk() :(
13340 if (plane_state->uapi.visible && plane->min_cdclk) {
13341 if (crtc_state->double_wide ||
13342 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13343 crtc_state->min_cdclk[plane->id] =
13344 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13346 crtc_state->min_cdclk[plane->id] =
13347 crtc_state->pixel_rate;
13349 drm_dbg_kms(&dev_priv->drm,
13350 "[PLANE:%d:%s] min_cdclk %d kHz\n",
13351 plane->base.base.id, plane->base.name,
13352 crtc_state->min_cdclk[plane->id]);
13355 if (crtc_state->hw.active) {
13356 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13357 if (drm_WARN_ON(dev, min_cdclk < 0))
13361 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13362 cdclk_state->min_voltage_level[crtc->pipe] =
13363 crtc_state->min_voltage_level;
13365 intel_bw_crtc_update(bw_state, crtc_state);
13367 intel_pipe_config_sanity_check(dev_priv, crtc_state);
13369 /* discard our incomplete slave state, copy it from master */
13370 if (crtc_state->bigjoiner && crtc_state->hw.active) {
13371 struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13372 struct intel_crtc_state *slave_crtc_state =
13373 to_intel_crtc_state(slave->base.state);
13375 copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13376 slave->base.mode = crtc->base.mode;
13378 cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13379 cdclk_state->min_voltage_level[slave->pipe] =
13380 crtc_state->min_voltage_level;
13382 for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13383 const struct intel_plane_state *plane_state =
13384 to_intel_plane_state(plane->base.state);
13387 * FIXME don't have the fb yet, so can't
13388 * use intel_plane_data_rate() :(
13390 if (plane_state->uapi.visible)
13391 crtc_state->data_rate[plane->id] =
13392 4 * crtc_state->pixel_rate;
13394 crtc_state->data_rate[plane->id] = 0;
13397 intel_bw_crtc_update(bw_state, slave_crtc_state);
13398 drm_calc_timestamping_constants(&slave->base,
13399 &slave_crtc_state->hw.adjusted_mode);
13405 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13407 struct intel_encoder *encoder;
13409 for_each_intel_encoder(&dev_priv->drm, encoder) {
13410 struct intel_crtc_state *crtc_state;
13412 if (!encoder->get_power_domains)
13416 * MST-primary and inactive encoders don't have a crtc state
13417 * and neither of these require any power domain references.
13419 if (!encoder->base.crtc)
13422 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13423 encoder->get_power_domains(encoder, crtc_state);
13427 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13430 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
13431 * Also known as Wa_14010480278.
13433 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
13434 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13435 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13437 if (IS_HASWELL(dev_priv)) {
13439 * WaRsPkgCStateDisplayPMReq:hsw
13440 * System hang if this isn't done before disabling all planes!
13442 intel_de_write(dev_priv, CHICKEN_PAR1_1,
13443 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13446 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13447 /* Display WA #1142:kbl,cfl,cml */
13448 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13449 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13450 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13451 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13452 KBL_ARB_FILL_SPARE_14);
13456 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13457 enum port port, i915_reg_t hdmi_reg)
13459 u32 val = intel_de_read(dev_priv, hdmi_reg);
13461 if (val & SDVO_ENABLE ||
13462 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13465 drm_dbg_kms(&dev_priv->drm,
13466 "Sanitizing transcoder select for HDMI %c\n",
13469 val &= ~SDVO_PIPE_SEL_MASK;
13470 val |= SDVO_PIPE_SEL(PIPE_A);
13472 intel_de_write(dev_priv, hdmi_reg, val);
13475 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13476 enum port port, i915_reg_t dp_reg)
13478 u32 val = intel_de_read(dev_priv, dp_reg);
13480 if (val & DP_PORT_EN ||
13481 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13484 drm_dbg_kms(&dev_priv->drm,
13485 "Sanitizing transcoder select for DP %c\n",
13488 val &= ~DP_PIPE_SEL_MASK;
13489 val |= DP_PIPE_SEL(PIPE_A);
13491 intel_de_write(dev_priv, dp_reg, val);
13494 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13497 * The BIOS may select transcoder B on some of the PCH
13498 * ports even it doesn't enable the port. This would trip
13499 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13500 * Sanitize the transcoder select bits to prevent that. We
13501 * assume that the BIOS never actually enabled the port,
13502 * because if it did we'd actually have to toggle the port
13503 * on and back off to make the transcoder A select stick
13504 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13505 * intel_disable_sdvo()).
13507 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13508 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13509 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13511 /* PCH SDVOB multiplex with HDMIB */
13512 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13513 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13514 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13517 /* Scan out the current hw modeset state,
13518 * and sanitizes it to the current state
13521 intel_modeset_setup_hw_state(struct drm_device *dev,
13522 struct drm_modeset_acquire_ctx *ctx)
13524 struct drm_i915_private *dev_priv = to_i915(dev);
13525 struct intel_encoder *encoder;
13526 struct intel_crtc *crtc;
13527 intel_wakeref_t wakeref;
13529 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13531 intel_early_display_was(dev_priv);
13532 intel_modeset_readout_hw_state(dev);
13534 /* HW state is read out, now we need to sanitize this mess. */
13536 /* Sanitize the TypeC port mode upfront, encoders depend on this */
13537 for_each_intel_encoder(dev, encoder) {
13538 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
13540 /* We need to sanitize only the MST primary port. */
13541 if (encoder->type != INTEL_OUTPUT_DP_MST &&
13542 intel_phy_is_tc(dev_priv, phy))
13543 intel_tc_port_sanitize(enc_to_dig_port(encoder));
13546 get_encoder_power_domains(dev_priv);
13548 if (HAS_PCH_IBX(dev_priv))
13549 ibx_sanitize_pch_ports(dev_priv);
13552 * intel_sanitize_plane_mapping() may need to do vblank
13553 * waits, so we need vblank interrupts restored beforehand.
13555 for_each_intel_crtc(&dev_priv->drm, crtc) {
13556 struct intel_crtc_state *crtc_state =
13557 to_intel_crtc_state(crtc->base.state);
13559 drm_crtc_vblank_reset(&crtc->base);
13561 if (crtc_state->hw.active)
13562 intel_crtc_vblank_on(crtc_state);
13565 intel_sanitize_plane_mapping(dev_priv);
13567 for_each_intel_encoder(dev, encoder)
13568 intel_sanitize_encoder(encoder);
13570 for_each_intel_crtc(&dev_priv->drm, crtc) {
13571 struct intel_crtc_state *crtc_state =
13572 to_intel_crtc_state(crtc->base.state);
13574 intel_sanitize_crtc(crtc, ctx);
13575 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13578 intel_modeset_update_connector_atomic_state(dev);
13580 intel_dpll_sanitize_state(dev_priv);
13582 if (IS_G4X(dev_priv)) {
13583 g4x_wm_get_hw_state(dev_priv);
13584 g4x_wm_sanitize(dev_priv);
13585 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13586 vlv_wm_get_hw_state(dev_priv);
13587 vlv_wm_sanitize(dev_priv);
13588 } else if (INTEL_GEN(dev_priv) >= 9) {
13589 skl_wm_get_hw_state(dev_priv);
13590 } else if (HAS_PCH_SPLIT(dev_priv)) {
13591 ilk_wm_get_hw_state(dev_priv);
13594 for_each_intel_crtc(dev, crtc) {
13595 struct intel_crtc_state *crtc_state =
13596 to_intel_crtc_state(crtc->base.state);
13599 put_domains = modeset_get_crtc_power_domains(crtc_state);
13600 if (drm_WARN_ON(dev, put_domains))
13601 modeset_put_crtc_power_domains(crtc, put_domains);
13604 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13607 void intel_display_resume(struct drm_device *dev)
13609 struct drm_i915_private *dev_priv = to_i915(dev);
13610 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13611 struct drm_modeset_acquire_ctx ctx;
13614 dev_priv->modeset_restore_state = NULL;
13616 state->acquire_ctx = &ctx;
13618 drm_modeset_acquire_init(&ctx, 0);
13621 ret = drm_modeset_lock_all_ctx(dev, &ctx);
13622 if (ret != -EDEADLK)
13625 drm_modeset_backoff(&ctx);
13629 ret = __intel_display_resume(dev, state, &ctx);
13631 intel_enable_ipc(dev_priv);
13632 drm_modeset_drop_locks(&ctx);
13633 drm_modeset_acquire_fini(&ctx);
13636 drm_err(&dev_priv->drm,
13637 "Restoring old state failed with %i\n", ret);
13639 drm_atomic_state_put(state);
13642 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13644 struct intel_connector *connector;
13645 struct drm_connector_list_iter conn_iter;
13647 /* Kill all the work that may have been queued by hpd. */
13648 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13649 for_each_intel_connector_iter(connector, &conn_iter) {
13650 if (connector->modeset_retry_work.func)
13651 cancel_work_sync(&connector->modeset_retry_work);
13652 if (connector->hdcp.shim) {
13653 cancel_delayed_work_sync(&connector->hdcp.check_work);
13654 cancel_work_sync(&connector->hdcp.prop_work);
13657 drm_connector_list_iter_end(&conn_iter);
13660 /* part #1: call before irq uninstall */
13661 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13663 flush_workqueue(i915->flip_wq);
13664 flush_workqueue(i915->modeset_wq);
13666 flush_work(&i915->atomic_helper.free_work);
13667 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13670 /* part #2: call after irq uninstall */
13671 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13674 * Due to the hpd irq storm handling the hotplug work can re-arm the
13675 * poll handlers. Hence disable polling after hpd handling is shut down.
13677 intel_hpd_poll_fini(i915);
13680 * MST topology needs to be suspended so we don't have any calls to
13681 * fbdev after it's finalized. MST will be destroyed later as part of
13682 * drm_mode_config_cleanup()
13684 intel_dp_mst_suspend(i915);
13686 /* poll work can call into fbdev, hence clean that up afterwards */
13687 intel_fbdev_fini(i915);
13689 intel_unregister_dsm_handler();
13691 intel_fbc_global_disable(i915);
13693 /* flush any delayed tasks or pending work */
13694 flush_scheduled_work();
13696 intel_hdcp_component_fini(i915);
13698 intel_mode_config_cleanup(i915);
13700 intel_overlay_cleanup(i915);
13702 intel_gmbus_teardown(i915);
13704 destroy_workqueue(i915->flip_wq);
13705 destroy_workqueue(i915->modeset_wq);
13707 intel_fbc_cleanup_cfb(i915);
13710 /* part #3: call after gem init */
13711 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13713 intel_csr_ucode_fini(i915);
13715 intel_power_domains_driver_remove(i915);
13717 intel_vga_unregister(i915);
13719 intel_bios_driver_remove(i915);
13722 void intel_display_driver_register(struct drm_i915_private *i915)
13724 if (!HAS_DISPLAY(i915))
13727 intel_display_debugfs_register(i915);
13729 /* Must be done after probing outputs */
13730 intel_opregion_register(i915);
13731 acpi_video_register();
13733 intel_audio_init(i915);
13736 * Some ports require correctly set-up hpd registers for
13737 * detection to work properly (leading to ghost connected
13738 * connector status), e.g. VGA on gm45. Hence we can only set
13739 * up the initial fbdev config after hpd irqs are fully
13740 * enabled. We do it last so that the async config cannot run
13741 * before the connectors are registered.
13743 intel_fbdev_initial_config_async(&i915->drm);
13746 * We need to coordinate the hotplugs with the asynchronous
13747 * fbdev configuration, for which we use the
13748 * fbdev->async_cookie.
13750 drm_kms_helper_poll_init(&i915->drm);
13753 void intel_display_driver_unregister(struct drm_i915_private *i915)
13755 if (!HAS_DISPLAY(i915))
13758 intel_fbdev_unregister(i915);
13759 intel_audio_deinit(i915);
13762 * After flushing the fbdev (incl. a late async config which
13763 * will have delayed queuing of a hotplug event), then flush
13764 * the hotplug events.
13766 drm_kms_helper_poll_fini(&i915->drm);
13767 drm_atomic_helper_shutdown(&i915->drm);
13769 acpi_video_unregister();
13770 intel_opregion_unregister(i915);
13773 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
13775 struct intel_display_error_state {
13777 u32 power_well_driver;
13779 struct intel_cursor_error_state {
13784 } cursor[I915_MAX_PIPES];
13786 struct intel_pipe_error_state {
13787 bool power_domain_on;
13790 } pipe[I915_MAX_PIPES];
13792 struct intel_plane_error_state {
13800 } plane[I915_MAX_PIPES];
13802 struct intel_transcoder_error_state {
13804 bool power_domain_on;
13805 enum transcoder cpu_transcoder;
13818 struct intel_display_error_state *
13819 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
13821 struct intel_display_error_state *error;
13822 int transcoders[] = {
13831 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
13833 if (!HAS_DISPLAY(dev_priv))
13836 error = kzalloc(sizeof(*error), GFP_ATOMIC);
13840 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13841 error->power_well_driver = intel_de_read(dev_priv,
13842 HSW_PWR_WELL_CTL2);
13844 for_each_pipe(dev_priv, i) {
13845 error->pipe[i].power_domain_on =
13846 __intel_display_power_is_enabled(dev_priv,
13847 POWER_DOMAIN_PIPE(i));
13848 if (!error->pipe[i].power_domain_on)
13851 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
13852 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
13853 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
13855 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
13856 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
13857 if (INTEL_GEN(dev_priv) <= 3) {
13858 error->plane[i].size = intel_de_read(dev_priv,
13860 error->plane[i].pos = intel_de_read(dev_priv,
13863 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13864 error->plane[i].addr = intel_de_read(dev_priv,
13866 if (INTEL_GEN(dev_priv) >= 4) {
13867 error->plane[i].surface = intel_de_read(dev_priv,
13869 error->plane[i].tile_offset = intel_de_read(dev_priv,
13873 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
13875 if (HAS_GMCH(dev_priv))
13876 error->pipe[i].stat = intel_de_read(dev_priv,
13880 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13881 enum transcoder cpu_transcoder = transcoders[i];
13883 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
13886 error->transcoder[i].available = true;
13887 error->transcoder[i].power_domain_on =
13888 __intel_display_power_is_enabled(dev_priv,
13889 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13890 if (!error->transcoder[i].power_domain_on)
13893 error->transcoder[i].cpu_transcoder = cpu_transcoder;
13895 error->transcoder[i].conf = intel_de_read(dev_priv,
13896 PIPECONF(cpu_transcoder));
13897 error->transcoder[i].htotal = intel_de_read(dev_priv,
13898 HTOTAL(cpu_transcoder));
13899 error->transcoder[i].hblank = intel_de_read(dev_priv,
13900 HBLANK(cpu_transcoder));
13901 error->transcoder[i].hsync = intel_de_read(dev_priv,
13902 HSYNC(cpu_transcoder));
13903 error->transcoder[i].vtotal = intel_de_read(dev_priv,
13904 VTOTAL(cpu_transcoder));
13905 error->transcoder[i].vblank = intel_de_read(dev_priv,
13906 VBLANK(cpu_transcoder));
13907 error->transcoder[i].vsync = intel_de_read(dev_priv,
13908 VSYNC(cpu_transcoder));
13914 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13917 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13918 struct intel_display_error_state *error)
13920 struct drm_i915_private *dev_priv = m->i915;
13926 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
13927 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13928 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13929 error->power_well_driver);
13930 for_each_pipe(dev_priv, i) {
13931 err_printf(m, "Pipe [%d]:\n", i);
13932 err_printf(m, " Power: %s\n",
13933 onoff(error->pipe[i].power_domain_on));
13934 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
13935 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
13937 err_printf(m, "Plane [%d]:\n", i);
13938 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
13939 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
13940 if (INTEL_GEN(dev_priv) <= 3) {
13941 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
13942 err_printf(m, " POS: %08x\n", error->plane[i].pos);
13944 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13945 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
13946 if (INTEL_GEN(dev_priv) >= 4) {
13947 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
13948 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
13951 err_printf(m, "Cursor [%d]:\n", i);
13952 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
13953 err_printf(m, " POS: %08x\n", error->cursor[i].position);
13954 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
13957 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13958 if (!error->transcoder[i].available)
13961 err_printf(m, "CPU transcoder: %s\n",
13962 transcoder_name(error->transcoder[i].cpu_transcoder));
13963 err_printf(m, " Power: %s\n",
13964 onoff(error->transcoder[i].power_domain_on));
13965 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
13966 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
13967 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
13968 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
13969 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
13970 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
13971 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);