2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_display_debugfs.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dp_mst.h"
51 #include "display/intel_dpll.h"
52 #include "display/intel_dpll_mgr.h"
53 #include "display/intel_dsi.h"
54 #include "display/intel_dvo.h"
55 #include "display/intel_gmbus.h"
56 #include "display/intel_hdmi.h"
57 #include "display/intel_lvds.h"
58 #include "display/intel_sdvo.h"
59 #include "display/intel_tv.h"
60 #include "display/intel_vdsc.h"
61 #include "display/intel_vrr.h"
63 #include "gem/i915_gem_object.h"
65 #include "gt/intel_rps.h"
68 #include "i915_trace.h"
69 #include "intel_acpi.h"
70 #include "intel_atomic.h"
71 #include "intel_atomic_plane.h"
73 #include "intel_cdclk.h"
74 #include "intel_color.h"
75 #include "intel_crtc.h"
76 #include "intel_csr.h"
77 #include "intel_display_types.h"
78 #include "intel_dp_link_training.h"
79 #include "intel_fbc.h"
80 #include "intel_fdi.h"
81 #include "intel_fbdev.h"
82 #include "intel_fifo_underrun.h"
83 #include "intel_frontbuffer.h"
84 #include "intel_hdcp.h"
85 #include "intel_hotplug.h"
86 #include "intel_overlay.h"
87 #include "intel_pipe_crc.h"
89 #include "intel_pps.h"
90 #include "intel_psr.h"
91 #include "intel_quirks.h"
92 #include "intel_sideband.h"
93 #include "intel_sprite.h"
95 #include "intel_vga.h"
96 #include "i9xx_plane.h"
97 #include "skl_universal_plane.h"
99 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
100 struct intel_crtc_state *pipe_config);
101 static void ilk_pch_clock_get(struct intel_crtc *crtc,
102 struct intel_crtc_state *pipe_config);
104 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
105 struct drm_i915_gem_object *obj,
106 struct drm_mode_fb_cmd2 *mode_cmd);
107 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
108 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
109 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
110 const struct intel_link_m_n *m_n,
111 const struct intel_link_m_n *m2_n2);
112 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
114 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
115 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
116 static void vlv_prepare_pll(struct intel_crtc *crtc,
117 const struct intel_crtc_state *pipe_config);
118 static void chv_prepare_pll(struct intel_crtc *crtc,
119 const struct intel_crtc_state *pipe_config);
120 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
121 static void intel_modeset_setup_hw_state(struct drm_device *dev,
122 struct drm_modeset_acquire_ctx *ctx);
124 /* returns HPLL frequency in kHz */
125 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
127 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
129 /* Obtain SKU information */
130 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
131 CCK_FUSE_HPLL_FREQ_MASK;
133 return vco_freq[hpll_freq] * 1000;
136 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
137 const char *name, u32 reg, int ref_freq)
142 val = vlv_cck_read(dev_priv, reg);
143 divider = val & CCK_FREQUENCY_VALUES;
145 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
146 (divider << CCK_FREQUENCY_STATUS_SHIFT),
147 "%s change in progress\n", name);
149 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
152 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153 const char *name, u32 reg)
157 vlv_cck_get(dev_priv);
159 if (dev_priv->hpll_freq == 0)
160 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
162 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
164 vlv_cck_put(dev_priv);
169 static void intel_update_czclk(struct drm_i915_private *dev_priv)
171 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
174 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
175 CCK_CZ_CLOCK_CONTROL);
177 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
178 dev_priv->czclk_freq);
181 /* WA Display #0827: Gen9:all */
183 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
186 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
187 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
189 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
190 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
193 /* Wa_2006604312:icl,ehl */
195 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
199 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
200 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
202 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
203 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
207 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
209 return crtc_state->master_transcoder != INVALID_TRANSCODER;
213 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
215 return crtc_state->sync_mode_slaves_mask != 0;
219 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
221 return is_trans_port_sync_master(crtc_state) ||
222 is_trans_port_sync_slave(crtc_state);
225 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
228 i915_reg_t reg = PIPEDSL(pipe);
232 if (IS_GEN(dev_priv, 2))
233 line_mask = DSL_LINEMASK_GEN2;
235 line_mask = DSL_LINEMASK_GEN3;
237 line1 = intel_de_read(dev_priv, reg) & line_mask;
239 line2 = intel_de_read(dev_priv, reg) & line_mask;
241 return line1 != line2;
244 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
246 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
247 enum pipe pipe = crtc->pipe;
249 /* Wait for the display line to settle/start moving */
250 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
251 drm_err(&dev_priv->drm,
252 "pipe %c scanline %s wait timed out\n",
253 pipe_name(pipe), onoff(state));
256 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
258 wait_for_pipe_scanline_moving(crtc, false);
261 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
263 wait_for_pipe_scanline_moving(crtc, true);
267 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
269 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
270 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
272 if (INTEL_GEN(dev_priv) >= 4) {
273 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
274 i915_reg_t reg = PIPECONF(cpu_transcoder);
276 /* Wait for the Pipe State to go off */
277 if (intel_de_wait_for_clear(dev_priv, reg,
278 I965_PIPECONF_ACTIVE, 100))
279 drm_WARN(&dev_priv->drm, 1,
280 "pipe_off wait timed out\n");
282 intel_wait_for_pipe_scanline_stopped(crtc);
286 /* Only for pre-ILK configs */
287 void assert_pll(struct drm_i915_private *dev_priv,
288 enum pipe pipe, bool state)
293 val = intel_de_read(dev_priv, DPLL(pipe));
294 cur_state = !!(val & DPLL_VCO_ENABLE);
295 I915_STATE_WARN(cur_state != state,
296 "PLL state assertion failure (expected %s, current %s)\n",
297 onoff(state), onoff(cur_state));
300 /* XXX: the dsi pll is shared between MIPI DSI ports */
301 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
306 vlv_cck_get(dev_priv);
307 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
308 vlv_cck_put(dev_priv);
310 cur_state = val & DSI_PLL_VCO_EN;
311 I915_STATE_WARN(cur_state != state,
312 "DSI PLL state assertion failure (expected %s, current %s)\n",
313 onoff(state), onoff(cur_state));
316 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
317 enum pipe pipe, bool state)
321 if (HAS_DDI(dev_priv)) {
323 * DDI does not have a specific FDI_TX register.
325 * FDI is never fed from EDP transcoder
326 * so pipe->transcoder cast is fine here.
328 enum transcoder cpu_transcoder = (enum transcoder)pipe;
329 u32 val = intel_de_read(dev_priv,
330 TRANS_DDI_FUNC_CTL(cpu_transcoder));
331 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
333 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
334 cur_state = !!(val & FDI_TX_ENABLE);
336 I915_STATE_WARN(cur_state != state,
337 "FDI TX state assertion failure (expected %s, current %s)\n",
338 onoff(state), onoff(cur_state));
340 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
341 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
343 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
344 enum pipe pipe, bool state)
349 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
350 cur_state = !!(val & FDI_RX_ENABLE);
351 I915_STATE_WARN(cur_state != state,
352 "FDI RX state assertion failure (expected %s, current %s)\n",
353 onoff(state), onoff(cur_state));
355 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
356 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
358 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
363 /* ILK FDI PLL is always enabled */
364 if (IS_GEN(dev_priv, 5))
367 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
368 if (HAS_DDI(dev_priv))
371 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
372 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
375 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
376 enum pipe pipe, bool state)
381 val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
382 cur_state = !!(val & FDI_RX_PLL_ENABLE);
383 I915_STATE_WARN(cur_state != state,
384 "FDI RX PLL assertion failure (expected %s, current %s)\n",
385 onoff(state), onoff(cur_state));
388 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
392 enum pipe panel_pipe = INVALID_PIPE;
395 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
398 if (HAS_PCH_SPLIT(dev_priv)) {
401 pp_reg = PP_CONTROL(0);
402 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
405 case PANEL_PORT_SELECT_LVDS:
406 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
408 case PANEL_PORT_SELECT_DPA:
409 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
411 case PANEL_PORT_SELECT_DPC:
412 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
414 case PANEL_PORT_SELECT_DPD:
415 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
418 MISSING_CASE(port_sel);
421 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
422 /* presumably write lock depends on pipe, not port select */
423 pp_reg = PP_CONTROL(pipe);
428 pp_reg = PP_CONTROL(0);
429 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
431 drm_WARN_ON(&dev_priv->drm,
432 port_sel != PANEL_PORT_SELECT_LVDS);
433 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
436 val = intel_de_read(dev_priv, pp_reg);
437 if (!(val & PANEL_POWER_ON) ||
438 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
441 I915_STATE_WARN(panel_pipe == pipe && locked,
442 "panel assertion failure, pipe %c regs locked\n",
446 void assert_pipe(struct drm_i915_private *dev_priv,
447 enum transcoder cpu_transcoder, bool state)
450 enum intel_display_power_domain power_domain;
451 intel_wakeref_t wakeref;
453 /* we keep both pipes enabled on 830 */
454 if (IS_I830(dev_priv))
457 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
458 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
460 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
461 cur_state = !!(val & PIPECONF_ENABLE);
463 intel_display_power_put(dev_priv, power_domain, wakeref);
468 I915_STATE_WARN(cur_state != state,
469 "transcoder %s assertion failure (expected %s, current %s)\n",
470 transcoder_name(cpu_transcoder),
471 onoff(state), onoff(cur_state));
474 static void assert_plane(struct intel_plane *plane, bool state)
479 cur_state = plane->get_hw_state(plane, &pipe);
481 I915_STATE_WARN(cur_state != state,
482 "%s assertion failure (expected %s, current %s)\n",
483 plane->base.name, onoff(state), onoff(cur_state));
486 #define assert_plane_enabled(p) assert_plane(p, true)
487 #define assert_plane_disabled(p) assert_plane(p, false)
489 static void assert_planes_disabled(struct intel_crtc *crtc)
491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492 struct intel_plane *plane;
494 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
495 assert_plane_disabled(plane);
498 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
504 val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
505 enabled = !!(val & TRANS_ENABLE);
506 I915_STATE_WARN(enabled,
507 "transcoder assertion failed, should be off on pipe %c but is still active\n",
511 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
512 enum pipe pipe, enum port port,
518 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
520 I915_STATE_WARN(state && port_pipe == pipe,
521 "PCH DP %c enabled on transcoder %c, should be disabled\n",
522 port_name(port), pipe_name(pipe));
524 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
525 "IBX PCH DP %c still using transcoder B\n",
529 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
530 enum pipe pipe, enum port port,
536 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
538 I915_STATE_WARN(state && port_pipe == pipe,
539 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
540 port_name(port), pipe_name(pipe));
542 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
543 "IBX PCH HDMI %c still using transcoder B\n",
547 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
552 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
553 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
554 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
556 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
558 "PCH VGA enabled on transcoder %c, should be disabled\n",
561 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
563 "PCH LVDS enabled on transcoder %c, should be disabled\n",
566 /* PCH SDVOB multiplex with HDMIB */
567 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
568 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
569 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
572 static void _vlv_enable_pll(struct intel_crtc *crtc,
573 const struct intel_crtc_state *pipe_config)
575 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
576 enum pipe pipe = crtc->pipe;
578 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
579 intel_de_posting_read(dev_priv, DPLL(pipe));
582 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
583 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
586 static void vlv_enable_pll(struct intel_crtc *crtc,
587 const struct intel_crtc_state *pipe_config)
589 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
590 enum pipe pipe = crtc->pipe;
592 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
594 /* PLL is protected by panel, make sure we can write it */
595 assert_panel_unlocked(dev_priv, pipe);
597 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
598 _vlv_enable_pll(crtc, pipe_config);
600 intel_de_write(dev_priv, DPLL_MD(pipe),
601 pipe_config->dpll_hw_state.dpll_md);
602 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
606 static void _chv_enable_pll(struct intel_crtc *crtc,
607 const struct intel_crtc_state *pipe_config)
609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
610 enum pipe pipe = crtc->pipe;
611 enum dpio_channel port = vlv_pipe_to_channel(pipe);
614 vlv_dpio_get(dev_priv);
616 /* Enable back the 10bit clock to display controller */
617 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
618 tmp |= DPIO_DCLKP_EN;
619 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
621 vlv_dpio_put(dev_priv);
624 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
629 intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
631 /* Check PLL is locked */
632 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
633 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
636 static void chv_enable_pll(struct intel_crtc *crtc,
637 const struct intel_crtc_state *pipe_config)
639 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
640 enum pipe pipe = crtc->pipe;
642 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
644 /* PLL is protected by panel, make sure we can write it */
645 assert_panel_unlocked(dev_priv, pipe);
647 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
648 _chv_enable_pll(crtc, pipe_config);
650 if (pipe != PIPE_A) {
652 * WaPixelRepeatModeFixForC0:chv
654 * DPLLCMD is AWOL. Use chicken bits to propagate
655 * the value from DPLLBMD to either pipe B or C.
657 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
658 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
659 pipe_config->dpll_hw_state.dpll_md);
660 intel_de_write(dev_priv, CBR4_VLV, 0);
661 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
664 * DPLLB VGA mode also seems to cause problems.
665 * We should always have it disabled.
667 drm_WARN_ON(&dev_priv->drm,
668 (intel_de_read(dev_priv, DPLL(PIPE_B)) &
669 DPLL_VGA_MODE_DIS) == 0);
671 intel_de_write(dev_priv, DPLL_MD(pipe),
672 pipe_config->dpll_hw_state.dpll_md);
673 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
677 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
679 if (IS_I830(dev_priv))
682 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
685 static void i9xx_enable_pll(struct intel_crtc *crtc,
686 const struct intel_crtc_state *crtc_state)
688 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
689 i915_reg_t reg = DPLL(crtc->pipe);
690 u32 dpll = crtc_state->dpll_hw_state.dpll;
693 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
695 /* PLL is protected by panel, make sure we can write it */
696 if (i9xx_has_pps(dev_priv))
697 assert_panel_unlocked(dev_priv, crtc->pipe);
700 * Apparently we need to have VGA mode enabled prior to changing
701 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
702 * dividers, even though the register value does change.
704 intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
705 intel_de_write(dev_priv, reg, dpll);
707 /* Wait for the clocks to stabilize. */
708 intel_de_posting_read(dev_priv, reg);
711 if (INTEL_GEN(dev_priv) >= 4) {
712 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
713 crtc_state->dpll_hw_state.dpll_md);
715 /* The pixel multiplier can only be updated once the
716 * DPLL is enabled and the clocks are stable.
720 intel_de_write(dev_priv, reg, dpll);
723 /* We do this three times for luck */
724 for (i = 0; i < 3; i++) {
725 intel_de_write(dev_priv, reg, dpll);
726 intel_de_posting_read(dev_priv, reg);
727 udelay(150); /* wait for warmup */
731 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
733 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
734 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
735 enum pipe pipe = crtc->pipe;
737 /* Don't disable pipe or pipe PLLs if needed */
738 if (IS_I830(dev_priv))
741 /* Make sure the pipe isn't still relying on us */
742 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
744 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
745 intel_de_posting_read(dev_priv, DPLL(pipe));
748 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
752 /* Make sure the pipe isn't still relying on us */
753 assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
755 val = DPLL_INTEGRATED_REF_CLK_VLV |
756 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
758 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
760 intel_de_write(dev_priv, DPLL(pipe), val);
761 intel_de_posting_read(dev_priv, DPLL(pipe));
764 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
766 enum dpio_channel port = vlv_pipe_to_channel(pipe);
769 /* Make sure the pipe isn't still relying on us */
770 assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
772 val = DPLL_SSC_REF_CLK_CHV |
773 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
775 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
777 intel_de_write(dev_priv, DPLL(pipe), val);
778 intel_de_posting_read(dev_priv, DPLL(pipe));
780 vlv_dpio_get(dev_priv);
782 /* Disable 10bit clock to display controller */
783 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
784 val &= ~DPIO_DCLKP_EN;
785 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
787 vlv_dpio_put(dev_priv);
790 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
791 struct intel_digital_port *dig_port,
792 unsigned int expected_mask)
797 switch (dig_port->base.port) {
799 port_mask = DPLL_PORTB_READY_MASK;
803 port_mask = DPLL_PORTC_READY_MASK;
808 port_mask = DPLL_PORTD_READY_MASK;
809 dpll_reg = DPIO_PHY_STATUS;
815 if (intel_de_wait_for_register(dev_priv, dpll_reg,
816 port_mask, expected_mask, 1000))
817 drm_WARN(&dev_priv->drm, 1,
818 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
819 dig_port->base.base.base.id, dig_port->base.base.name,
820 intel_de_read(dev_priv, dpll_reg) & port_mask,
824 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
826 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
827 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
828 enum pipe pipe = crtc->pipe;
830 u32 val, pipeconf_val;
832 /* Make sure PCH DPLL is enabled */
833 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
835 /* FDI must be feeding us bits for PCH ports */
836 assert_fdi_tx_enabled(dev_priv, pipe);
837 assert_fdi_rx_enabled(dev_priv, pipe);
839 if (HAS_PCH_CPT(dev_priv)) {
840 reg = TRANS_CHICKEN2(pipe);
841 val = intel_de_read(dev_priv, reg);
843 * Workaround: Set the timing override bit
844 * before enabling the pch transcoder.
846 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
847 /* Configure frame start delay to match the CPU */
848 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
849 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
850 intel_de_write(dev_priv, reg, val);
853 reg = PCH_TRANSCONF(pipe);
854 val = intel_de_read(dev_priv, reg);
855 pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
857 if (HAS_PCH_IBX(dev_priv)) {
858 /* Configure frame start delay to match the CPU */
859 val &= ~TRANS_FRAME_START_DELAY_MASK;
860 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
863 * Make the BPC in transcoder be consistent with
864 * that in pipeconf reg. For HDMI we must use 8bpc
865 * here for both 8bpc and 12bpc.
867 val &= ~PIPECONF_BPC_MASK;
868 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
869 val |= PIPECONF_8BPC;
871 val |= pipeconf_val & PIPECONF_BPC_MASK;
874 val &= ~TRANS_INTERLACE_MASK;
875 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
876 if (HAS_PCH_IBX(dev_priv) &&
877 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
878 val |= TRANS_LEGACY_INTERLACED_ILK;
880 val |= TRANS_INTERLACED;
882 val |= TRANS_PROGRESSIVE;
885 intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
886 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
887 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
891 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
892 enum transcoder cpu_transcoder)
894 u32 val, pipeconf_val;
896 /* FDI must be feeding us bits for PCH ports */
897 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
898 assert_fdi_rx_enabled(dev_priv, PIPE_A);
900 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
901 /* Workaround: set timing override bit. */
902 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
903 /* Configure frame start delay to match the CPU */
904 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
905 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
906 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
909 pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
911 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
912 PIPECONF_INTERLACED_ILK)
913 val |= TRANS_INTERLACED;
915 val |= TRANS_PROGRESSIVE;
917 intel_de_write(dev_priv, LPT_TRANSCONF, val);
918 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
919 TRANS_STATE_ENABLE, 100))
920 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
923 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
929 /* FDI relies on the transcoder */
930 assert_fdi_tx_disabled(dev_priv, pipe);
931 assert_fdi_rx_disabled(dev_priv, pipe);
933 /* Ports must be off as well */
934 assert_pch_ports_disabled(dev_priv, pipe);
936 reg = PCH_TRANSCONF(pipe);
937 val = intel_de_read(dev_priv, reg);
938 val &= ~TRANS_ENABLE;
939 intel_de_write(dev_priv, reg, val);
940 /* wait for PCH transcoder off, transcoder state */
941 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
942 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
945 if (HAS_PCH_CPT(dev_priv)) {
946 /* Workaround: Clear the timing override chicken bit again. */
947 reg = TRANS_CHICKEN2(pipe);
948 val = intel_de_read(dev_priv, reg);
949 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
950 intel_de_write(dev_priv, reg, val);
954 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
958 val = intel_de_read(dev_priv, LPT_TRANSCONF);
959 val &= ~TRANS_ENABLE;
960 intel_de_write(dev_priv, LPT_TRANSCONF, val);
961 /* wait for PCH transcoder off, transcoder state */
962 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
963 TRANS_STATE_ENABLE, 50))
964 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
966 /* Workaround: clear timing override bit. */
967 val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
968 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
969 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
972 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
974 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
976 if (HAS_PCH_LPT(dev_priv))
982 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
984 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
985 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
986 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
987 enum pipe pipe = crtc->pipe;
991 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
993 assert_planes_disabled(crtc);
996 * A pipe without a PLL won't actually be able to drive bits from
997 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1000 if (HAS_GMCH(dev_priv)) {
1001 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1002 assert_dsi_pll_enabled(dev_priv);
1004 assert_pll_enabled(dev_priv, pipe);
1006 if (new_crtc_state->has_pch_encoder) {
1007 /* if driving the PCH, we need FDI enabled */
1008 assert_fdi_rx_pll_enabled(dev_priv,
1009 intel_crtc_pch_transcoder(crtc));
1010 assert_fdi_tx_pll_enabled(dev_priv,
1011 (enum pipe) cpu_transcoder);
1013 /* FIXME: assert CPU port conditions for SNB+ */
1016 trace_intel_pipe_enable(crtc);
1018 reg = PIPECONF(cpu_transcoder);
1019 val = intel_de_read(dev_priv, reg);
1020 if (val & PIPECONF_ENABLE) {
1021 /* we keep both pipes enabled on 830 */
1022 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1026 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1027 intel_de_posting_read(dev_priv, reg);
1030 * Until the pipe starts PIPEDSL reads will return a stale value,
1031 * which causes an apparent vblank timestamp jump when PIPEDSL
1032 * resets to its proper value. That also messes up the frame count
1033 * when it's derived from the timestamps. So let's wait for the
1034 * pipe to start properly before we call drm_crtc_vblank_on()
1036 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1037 intel_wait_for_pipe_scanline_moving(crtc);
1040 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1042 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1043 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1044 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1045 enum pipe pipe = crtc->pipe;
1049 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1052 * Make sure planes won't keep trying to pump pixels to us,
1053 * or we might hang the display.
1055 assert_planes_disabled(crtc);
1057 trace_intel_pipe_disable(crtc);
1059 reg = PIPECONF(cpu_transcoder);
1060 val = intel_de_read(dev_priv, reg);
1061 if ((val & PIPECONF_ENABLE) == 0)
1065 * Double wide has implications for planes
1066 * so best keep it disabled when not needed.
1068 if (old_crtc_state->double_wide)
1069 val &= ~PIPECONF_DOUBLE_WIDE;
1071 /* Don't disable pipe or pipe PLLs if needed */
1072 if (!IS_I830(dev_priv))
1073 val &= ~PIPECONF_ENABLE;
1075 intel_de_write(dev_priv, reg, val);
1076 if ((val & PIPECONF_ENABLE) == 0)
1077 intel_wait_for_pipe_off(old_crtc_state);
1080 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1082 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1085 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1087 if (is_ccs_modifier(fb->modifier))
1088 return is_ccs_plane(fb, plane);
1094 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1097 return info->is_yuv &&
1098 info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1101 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
1104 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
1109 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1111 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1112 unsigned int cpp = fb->format->cpp[color_plane];
1114 switch (fb->modifier) {
1115 case DRM_FORMAT_MOD_LINEAR:
1116 return intel_tile_size(dev_priv);
1117 case I915_FORMAT_MOD_X_TILED:
1118 if (IS_GEN(dev_priv, 2))
1122 case I915_FORMAT_MOD_Y_TILED_CCS:
1123 if (is_ccs_plane(fb, color_plane))
1126 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1127 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1128 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1129 if (is_ccs_plane(fb, color_plane))
1132 case I915_FORMAT_MOD_Y_TILED:
1133 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1137 case I915_FORMAT_MOD_Yf_TILED_CCS:
1138 if (is_ccs_plane(fb, color_plane))
1141 case I915_FORMAT_MOD_Yf_TILED:
1157 MISSING_CASE(fb->modifier);
1163 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1165 if (is_gen12_ccs_plane(fb, color_plane))
1168 return intel_tile_size(to_i915(fb->dev)) /
1169 intel_tile_width_bytes(fb, color_plane);
1172 /* Return the tile dimensions in pixel units */
1173 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1174 unsigned int *tile_width,
1175 unsigned int *tile_height)
1177 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1178 unsigned int cpp = fb->format->cpp[color_plane];
1180 *tile_width = tile_width_bytes / cpp;
1181 *tile_height = intel_tile_height(fb, color_plane);
1184 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
1187 unsigned int tile_width, tile_height;
1189 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1191 return fb->pitches[color_plane] * tile_height;
1195 intel_fb_align_height(const struct drm_framebuffer *fb,
1196 int color_plane, unsigned int height)
1198 unsigned int tile_height = intel_tile_height(fb, color_plane);
1200 return ALIGN(height, tile_height);
1203 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1205 unsigned int size = 0;
1208 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1209 size += rot_info->plane[i].width * rot_info->plane[i].height;
1214 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1216 unsigned int size = 0;
1219 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1220 size += rem_info->plane[i].width * rem_info->plane[i].height;
1226 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1227 const struct drm_framebuffer *fb,
1228 unsigned int rotation)
1230 view->type = I915_GGTT_VIEW_NORMAL;
1231 if (drm_rotation_90_or_270(rotation)) {
1232 view->type = I915_GGTT_VIEW_ROTATED;
1233 view->rotated = to_intel_framebuffer(fb)->rot_info;
1237 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1239 if (IS_I830(dev_priv))
1241 else if (IS_I85X(dev_priv))
1243 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1249 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1251 if (INTEL_GEN(dev_priv) >= 9)
1253 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1254 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1256 else if (INTEL_GEN(dev_priv) >= 4)
1262 static bool has_async_flips(struct drm_i915_private *i915)
1264 return INTEL_GEN(i915) >= 5;
1267 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1270 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1272 /* AUX_DIST needs only 4K alignment */
1273 if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
1274 is_ccs_plane(fb, color_plane))
1277 switch (fb->modifier) {
1278 case DRM_FORMAT_MOD_LINEAR:
1279 return intel_linear_alignment(dev_priv);
1280 case I915_FORMAT_MOD_X_TILED:
1281 if (has_async_flips(dev_priv))
1284 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1285 if (is_semiplanar_uv_plane(fb, color_plane))
1286 return intel_tile_row_size(fb, color_plane);
1288 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1289 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1291 case I915_FORMAT_MOD_Y_TILED_CCS:
1292 case I915_FORMAT_MOD_Yf_TILED_CCS:
1293 case I915_FORMAT_MOD_Y_TILED:
1294 if (INTEL_GEN(dev_priv) >= 12 &&
1295 is_semiplanar_uv_plane(fb, color_plane))
1296 return intel_tile_row_size(fb, color_plane);
1298 case I915_FORMAT_MOD_Yf_TILED:
1299 return 1 * 1024 * 1024;
1301 MISSING_CASE(fb->modifier);
1306 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1308 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1309 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1311 return INTEL_GEN(dev_priv) < 4 ||
1313 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
1317 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1318 const struct i915_ggtt_view *view,
1320 unsigned long *out_flags)
1322 struct drm_device *dev = fb->dev;
1323 struct drm_i915_private *dev_priv = to_i915(dev);
1324 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1325 intel_wakeref_t wakeref;
1326 struct i915_vma *vma;
1327 unsigned int pinctl;
1330 if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1331 return ERR_PTR(-EINVAL);
1333 alignment = intel_surf_alignment(fb, 0);
1334 if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1335 return ERR_PTR(-EINVAL);
1337 /* Note that the w/a also requires 64 PTE of padding following the
1338 * bo. We currently fill all unused PTE with the shadow page and so
1339 * we should always have valid PTE following the scanout preventing
1342 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1343 alignment = 256 * 1024;
1346 * Global gtt pte registers are special registers which actually forward
1347 * writes to a chunk of system memory. Which means that there is no risk
1348 * that the register values disappear as soon as we call
1349 * intel_runtime_pm_put(), so it is correct to wrap only the
1350 * pin/unpin/fence and not more.
1352 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1354 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1357 * Valleyview is definitely limited to scanning out the first
1358 * 512MiB. Lets presume this behaviour was inherited from the
1359 * g4x display engine and that all earlier gen are similarly
1360 * limited. Testing suggests that it is a little more
1361 * complicated than this. For example, Cherryview appears quite
1362 * happy to scanout from anywhere within its global aperture.
1365 if (HAS_GMCH(dev_priv))
1366 pinctl |= PIN_MAPPABLE;
1368 vma = i915_gem_object_pin_to_display_plane(obj,
1369 alignment, view, pinctl);
1373 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1377 * Install a fence for tiled scan-out. Pre-i965 always needs a
1378 * fence, whereas 965+ only requires a fence if using
1379 * framebuffer compression. For simplicity, we always, when
1380 * possible, install a fence as the cost is not that onerous.
1382 * If we fail to fence the tiled scanout, then either the
1383 * modeset will reject the change (which is highly unlikely as
1384 * the affected systems, all but one, do not have unmappable
1385 * space) or we will not be able to enable full powersaving
1386 * techniques (also likely not to apply due to various limits
1387 * FBC and the like impose on the size of the buffer, which
1388 * presumably we violated anyway with this unmappable buffer).
1389 * Anyway, it is presumably better to stumble onwards with
1390 * something and try to run the system in a "less than optimal"
1391 * mode that matches the user configuration.
1393 ret = i915_vma_pin_fence(vma);
1394 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1395 i915_gem_object_unpin_from_display_plane(vma);
1400 if (ret == 0 && vma->fence)
1401 *out_flags |= PLANE_HAS_FENCE;
1406 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1407 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1411 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1413 i915_gem_object_lock(vma->obj, NULL);
1414 if (flags & PLANE_HAS_FENCE)
1415 i915_vma_unpin_fence(vma);
1416 i915_gem_object_unpin_from_display_plane(vma);
1417 i915_gem_object_unlock(vma->obj);
1422 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
1423 unsigned int rotation)
1425 if (drm_rotation_90_or_270(rotation))
1426 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
1428 return fb->pitches[color_plane];
1432 * Convert the x/y offsets into a linear offset.
1433 * Only valid with 0/180 degree rotation, which is fine since linear
1434 * offset is only used with linear buffers on pre-hsw and tiled buffers
1435 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1437 u32 intel_fb_xy_to_linear(int x, int y,
1438 const struct intel_plane_state *state,
1441 const struct drm_framebuffer *fb = state->hw.fb;
1442 unsigned int cpp = fb->format->cpp[color_plane];
1443 unsigned int pitch = state->color_plane[color_plane].stride;
1445 return y * pitch + x * cpp;
1449 * Add the x/y offsets derived from fb->offsets[] to the user
1450 * specified plane src x/y offsets. The resulting x/y offsets
1451 * specify the start of scanout from the beginning of the gtt mapping.
1453 void intel_add_fb_offsets(int *x, int *y,
1454 const struct intel_plane_state *state,
1458 *x += state->color_plane[color_plane].x;
1459 *y += state->color_plane[color_plane].y;
1462 static u32 intel_adjust_tile_offset(int *x, int *y,
1463 unsigned int tile_width,
1464 unsigned int tile_height,
1465 unsigned int tile_size,
1466 unsigned int pitch_tiles,
1470 unsigned int pitch_pixels = pitch_tiles * tile_width;
1473 WARN_ON(old_offset & (tile_size - 1));
1474 WARN_ON(new_offset & (tile_size - 1));
1475 WARN_ON(new_offset > old_offset);
1477 tiles = (old_offset - new_offset) / tile_size;
1479 *y += tiles / pitch_tiles * tile_height;
1480 *x += tiles % pitch_tiles * tile_width;
1482 /* minimize x in case it got needlessly big */
1483 *y += *x / pitch_pixels * tile_height;
1489 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
1491 return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
1492 is_gen12_ccs_plane(fb, color_plane);
1495 static u32 intel_adjust_aligned_offset(int *x, int *y,
1496 const struct drm_framebuffer *fb,
1498 unsigned int rotation,
1500 u32 old_offset, u32 new_offset)
1502 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1503 unsigned int cpp = fb->format->cpp[color_plane];
1505 drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
1507 if (!is_surface_linear(fb, color_plane)) {
1508 unsigned int tile_size, tile_width, tile_height;
1509 unsigned int pitch_tiles;
1511 tile_size = intel_tile_size(dev_priv);
1512 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1514 if (drm_rotation_90_or_270(rotation)) {
1515 pitch_tiles = pitch / tile_height;
1516 swap(tile_width, tile_height);
1518 pitch_tiles = pitch / (tile_width * cpp);
1521 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1522 tile_size, pitch_tiles,
1523 old_offset, new_offset);
1525 old_offset += *y * pitch + *x * cpp;
1527 *y = (old_offset - new_offset) / pitch;
1528 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
1535 * Adjust the tile offset by moving the difference into
1538 u32 intel_plane_adjust_aligned_offset(int *x, int *y,
1539 const struct intel_plane_state *state,
1541 u32 old_offset, u32 new_offset)
1543 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
1545 state->color_plane[color_plane].stride,
1546 old_offset, new_offset);
1550 * Computes the aligned offset to the base tile and adjusts
1551 * x, y. bytes per pixel is assumed to be a power-of-two.
1553 * In the 90/270 rotated case, x and y are assumed
1554 * to be already rotated to match the rotated GTT view, and
1555 * pitch is the tile_height aligned framebuffer height.
1557 * This function is used when computing the derived information
1558 * under intel_framebuffer, so using any of that information
1559 * here is not allowed. Anything under drm_framebuffer can be
1560 * used. This is why the user has to pass in the pitch since it
1561 * is specified in the rotated orientation.
1563 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
1565 const struct drm_framebuffer *fb,
1568 unsigned int rotation,
1571 unsigned int cpp = fb->format->cpp[color_plane];
1572 u32 offset, offset_aligned;
1574 if (!is_surface_linear(fb, color_plane)) {
1575 unsigned int tile_size, tile_width, tile_height;
1576 unsigned int tile_rows, tiles, pitch_tiles;
1578 tile_size = intel_tile_size(dev_priv);
1579 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1581 if (drm_rotation_90_or_270(rotation)) {
1582 pitch_tiles = pitch / tile_height;
1583 swap(tile_width, tile_height);
1585 pitch_tiles = pitch / (tile_width * cpp);
1588 tile_rows = *y / tile_height;
1591 tiles = *x / tile_width;
1594 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
1596 offset_aligned = offset;
1598 offset_aligned = rounddown(offset_aligned, alignment);
1600 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1601 tile_size, pitch_tiles,
1602 offset, offset_aligned);
1604 offset = *y * pitch + *x * cpp;
1605 offset_aligned = offset;
1607 offset_aligned = rounddown(offset_aligned, alignment);
1608 *y = (offset % alignment) / pitch;
1609 *x = ((offset % alignment) - *y * pitch) / cpp;
1615 return offset_aligned;
1618 u32 intel_plane_compute_aligned_offset(int *x, int *y,
1619 const struct intel_plane_state *state,
1622 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
1623 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
1624 const struct drm_framebuffer *fb = state->hw.fb;
1625 unsigned int rotation = state->hw.rotation;
1626 int pitch = state->color_plane[color_plane].stride;
1629 if (intel_plane->id == PLANE_CURSOR)
1630 alignment = intel_cursor_alignment(dev_priv);
1632 alignment = intel_surf_alignment(fb, color_plane);
1634 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
1635 pitch, rotation, alignment);
1638 /* Convert the fb->offset[] into x/y offsets */
1639 static int intel_fb_offset_to_xy(int *x, int *y,
1640 const struct drm_framebuffer *fb,
1643 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1644 unsigned int height;
1647 if (INTEL_GEN(dev_priv) >= 12 &&
1648 is_semiplanar_uv_plane(fb, color_plane))
1649 alignment = intel_tile_row_size(fb, color_plane);
1650 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
1651 alignment = intel_tile_size(dev_priv);
1655 if (alignment != 0 && fb->offsets[color_plane] % alignment) {
1656 drm_dbg_kms(&dev_priv->drm,
1657 "Misaligned offset 0x%08x for color plane %d\n",
1658 fb->offsets[color_plane], color_plane);
1662 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
1663 height = ALIGN(height, intel_tile_height(fb, color_plane));
1665 /* Catch potential overflows early */
1666 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
1667 fb->offsets[color_plane])) {
1668 drm_dbg_kms(&dev_priv->drm,
1669 "Bad offset 0x%08x or pitch %d for color plane %d\n",
1670 fb->offsets[color_plane], fb->pitches[color_plane],
1678 intel_adjust_aligned_offset(x, y,
1679 fb, color_plane, DRM_MODE_ROTATE_0,
1680 fb->pitches[color_plane],
1681 fb->offsets[color_plane], 0);
1686 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1688 switch (fb_modifier) {
1689 case I915_FORMAT_MOD_X_TILED:
1690 return I915_TILING_X;
1691 case I915_FORMAT_MOD_Y_TILED:
1692 case I915_FORMAT_MOD_Y_TILED_CCS:
1693 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1694 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1695 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1696 return I915_TILING_Y;
1698 return I915_TILING_NONE;
1703 * From the Sky Lake PRM:
1704 * "The Color Control Surface (CCS) contains the compression status of
1705 * the cache-line pairs. The compression state of the cache-line pair
1706 * is specified by 2 bits in the CCS. Each CCS cache-line represents
1707 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1708 * cache-line-pairs. CCS is always Y tiled."
1710 * Since cache line pairs refers to horizontally adjacent cache lines,
1711 * each cache line in the CCS corresponds to an area of 32x16 cache
1712 * lines on the main surface. Since each pixel is 4 bytes, this gives
1713 * us a ratio of one byte in the CCS for each 8x16 pixels in the
1716 static const struct drm_format_info skl_ccs_formats[] = {
1717 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1718 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1719 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1720 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1721 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1722 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1723 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1724 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1728 * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1729 * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1730 * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1731 * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1734 static const struct drm_format_info gen12_ccs_formats[] = {
1735 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1736 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1737 .hsub = 1, .vsub = 1, },
1738 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1739 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1740 .hsub = 1, .vsub = 1, },
1741 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1742 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1743 .hsub = 1, .vsub = 1, .has_alpha = true },
1744 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1745 .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1746 .hsub = 1, .vsub = 1, .has_alpha = true },
1747 { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1748 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1749 .hsub = 2, .vsub = 1, .is_yuv = true },
1750 { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1751 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1752 .hsub = 2, .vsub = 1, .is_yuv = true },
1753 { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1754 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1755 .hsub = 2, .vsub = 1, .is_yuv = true },
1756 { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1757 .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1758 .hsub = 2, .vsub = 1, .is_yuv = true },
1759 { .format = DRM_FORMAT_NV12, .num_planes = 4,
1760 .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1761 .hsub = 2, .vsub = 2, .is_yuv = true },
1762 { .format = DRM_FORMAT_P010, .num_planes = 4,
1763 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1764 .hsub = 2, .vsub = 2, .is_yuv = true },
1765 { .format = DRM_FORMAT_P012, .num_planes = 4,
1766 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1767 .hsub = 2, .vsub = 2, .is_yuv = true },
1768 { .format = DRM_FORMAT_P016, .num_planes = 4,
1769 .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1770 .hsub = 2, .vsub = 2, .is_yuv = true },
1774 * Same as gen12_ccs_formats[] above, but with additional surface used
1775 * to pass Clear Color information in plane 2 with 64 bits of data.
1777 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1778 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1779 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1780 .hsub = 1, .vsub = 1, },
1781 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1782 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1783 .hsub = 1, .vsub = 1, },
1784 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1785 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1786 .hsub = 1, .vsub = 1, .has_alpha = true },
1787 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1788 .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1789 .hsub = 1, .vsub = 1, .has_alpha = true },
1792 static const struct drm_format_info *
1793 lookup_format_info(const struct drm_format_info formats[],
1794 int num_formats, u32 format)
1798 for (i = 0; i < num_formats; i++) {
1799 if (formats[i].format == format)
1806 static const struct drm_format_info *
1807 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1809 switch (cmd->modifier[0]) {
1810 case I915_FORMAT_MOD_Y_TILED_CCS:
1811 case I915_FORMAT_MOD_Yf_TILED_CCS:
1812 return lookup_format_info(skl_ccs_formats,
1813 ARRAY_SIZE(skl_ccs_formats),
1815 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1816 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1817 return lookup_format_info(gen12_ccs_formats,
1818 ARRAY_SIZE(gen12_ccs_formats),
1820 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1821 return lookup_format_info(gen12_ccs_cc_formats,
1822 ARRAY_SIZE(gen12_ccs_cc_formats),
1829 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1831 return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1835 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1836 u32 pixel_format, u64 modifier)
1838 struct intel_crtc *crtc;
1839 struct intel_plane *plane;
1842 * We assume the primary plane for pipe A has
1843 * the highest stride limits of them all,
1844 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1846 crtc = intel_get_first_crtc(dev_priv);
1850 plane = to_intel_plane(crtc->base.primary);
1852 return plane->max_stride(plane, pixel_format, modifier,
1857 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1858 u32 pixel_format, u64 modifier)
1861 * Arbitrary limit for gen4+ chosen to match the
1862 * render engine max stride.
1864 * The new CCS hash mode makes remapping impossible
1866 if (!is_ccs_modifier(modifier)) {
1867 if (INTEL_GEN(dev_priv) >= 7)
1869 else if (INTEL_GEN(dev_priv) >= 4)
1873 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1877 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1879 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1882 if (is_surface_linear(fb, color_plane)) {
1883 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1888 * To make remapping with linear generally feasible
1889 * we need the stride to be page aligned.
1891 if (fb->pitches[color_plane] > max_stride &&
1892 !is_ccs_modifier(fb->modifier))
1893 return intel_tile_size(dev_priv);
1898 tile_width = intel_tile_width_bytes(fb, color_plane);
1899 if (is_ccs_modifier(fb->modifier)) {
1901 * Display WA #0531: skl,bxt,kbl,glk
1903 * Render decompression and plane width > 3840
1904 * combined with horizontal panning requires the
1905 * plane stride to be a multiple of 4. We'll just
1906 * require the entire fb to accommodate that to avoid
1907 * potential runtime errors at plane configuration time.
1909 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1912 * The main surface pitch must be padded to a multiple of four
1915 else if (INTEL_GEN(dev_priv) >= 12)
1921 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
1923 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1924 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1925 const struct drm_framebuffer *fb = plane_state->hw.fb;
1928 /* We don't want to deal with remapping with cursors */
1929 if (plane->id == PLANE_CURSOR)
1933 * The display engine limits already match/exceed the
1934 * render engine limits, so not much point in remapping.
1935 * Would also need to deal with the fence POT alignment
1936 * and gen2 2KiB GTT tile size.
1938 if (INTEL_GEN(dev_priv) < 4)
1942 * The new CCS hash mode isn't compatible with remapping as
1943 * the virtual address of the pages affects the compressed data.
1945 if (is_ccs_modifier(fb->modifier))
1948 /* Linear needs a page aligned stride for remapping */
1949 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1950 unsigned int alignment = intel_tile_size(dev_priv) - 1;
1952 for (i = 0; i < fb->format->num_planes; i++) {
1953 if (fb->pitches[i] & alignment)
1961 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
1963 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1964 const struct drm_framebuffer *fb = plane_state->hw.fb;
1965 unsigned int rotation = plane_state->hw.rotation;
1966 u32 stride, max_stride;
1969 * No remapping for invisible planes since we don't have
1970 * an actual source viewport to remap.
1972 if (!plane_state->uapi.visible)
1975 if (!intel_plane_can_remap(plane_state))
1979 * FIXME: aux plane limits on gen9+ are
1980 * unclear in Bspec, for now no checking.
1982 stride = intel_fb_pitch(fb, 0, rotation);
1983 max_stride = plane->max_stride(plane, fb->format->format,
1984 fb->modifier, rotation);
1986 return stride > max_stride;
1990 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
1991 const struct drm_framebuffer *fb,
1996 if (color_plane == 0) {
2004 * TODO: Deduct the subsampling from the char block for all CCS
2005 * formats and planes.
2007 if (!is_gen12_ccs_plane(fb, color_plane)) {
2008 *hsub = fb->format->hsub;
2009 *vsub = fb->format->vsub;
2014 main_plane = skl_ccs_to_main_plane(fb, color_plane);
2015 *hsub = drm_format_info_block_width(fb->format, color_plane) /
2016 drm_format_info_block_width(fb->format, main_plane);
2019 * The min stride check in the core framebuffer_check() function
2020 * assumes that format->hsub applies to every plane except for the
2021 * first plane. That's incorrect for the CCS AUX plane of the first
2022 * plane, but for the above check to pass we must define the block
2023 * width with that subsampling applied to it. Adjust the width here
2024 * accordingly, so we can calculate the actual subsampling factor.
2026 if (main_plane == 0)
2027 *hsub *= fb->format->hsub;
2032 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2034 struct drm_i915_private *i915 = to_i915(fb->dev);
2035 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2038 int tile_width, tile_height;
2042 if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
2045 intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2046 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2049 tile_height *= vsub;
2051 ccs_x = (x * hsub) % tile_width;
2052 ccs_y = (y * vsub) % tile_height;
2054 main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
2055 main_x = intel_fb->normal[main_plane].x % tile_width;
2056 main_y = intel_fb->normal[main_plane].y % tile_height;
2059 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2060 * x/y offsets must match between CCS and the main surface.
2062 if (main_x != ccs_x || main_y != ccs_y) {
2063 drm_dbg_kms(&i915->drm,
2064 "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2067 intel_fb->normal[main_plane].x,
2068 intel_fb->normal[main_plane].y,
2077 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2079 int main_plane = is_ccs_plane(fb, color_plane) ?
2080 skl_ccs_to_main_plane(fb, color_plane) : 0;
2081 int main_hsub, main_vsub;
2084 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2085 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2086 *w = fb->width / main_hsub / hsub;
2087 *h = fb->height / main_vsub / vsub;
2091 * Setup the rotated view for an FB plane and return the size the GTT mapping
2092 * requires for this view.
2095 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2096 u32 gtt_offset_rotated, int x, int y,
2097 unsigned int width, unsigned int height,
2098 unsigned int tile_size,
2099 unsigned int tile_width, unsigned int tile_height,
2100 struct drm_framebuffer *fb)
2102 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2103 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2104 unsigned int pitch_tiles;
2107 /* Y or Yf modifiers required for 90/270 rotation */
2108 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2109 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2112 if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2115 rot_info->plane[plane] = *plane_info;
2117 intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2119 /* rotate the x/y offsets to match the GTT view */
2120 drm_rect_init(&r, x, y, width, height);
2122 plane_info->width * tile_width,
2123 plane_info->height * tile_height,
2124 DRM_MODE_ROTATE_270);
2128 /* rotate the tile dimensions to match the GTT view */
2129 pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
2130 swap(tile_width, tile_height);
2133 * We only keep the x/y offsets, so push all of the
2134 * gtt offset into the x/y offsets.
2136 intel_adjust_tile_offset(&x, &y,
2137 tile_width, tile_height,
2138 tile_size, pitch_tiles,
2139 gtt_offset_rotated * tile_size, 0);
2142 * First pixel of the framebuffer from
2143 * the start of the rotated gtt mapping.
2145 intel_fb->rotated[plane].x = x;
2146 intel_fb->rotated[plane].y = y;
2148 return plane_info->width * plane_info->height;
2152 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2153 struct drm_framebuffer *fb)
2155 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2156 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2157 u32 gtt_offset_rotated = 0;
2158 unsigned int max_size = 0;
2159 int i, num_planes = fb->format->num_planes;
2160 unsigned int tile_size = intel_tile_size(dev_priv);
2162 for (i = 0; i < num_planes; i++) {
2163 unsigned int width, height;
2164 unsigned int cpp, size;
2170 * Plane 2 of Render Compression with Clear Color fb modifier
2171 * is consumed by the driver and not passed to DE. Skip the
2172 * arithmetic related to alignment and offset calculation.
2174 if (is_gen12_ccs_cc_plane(fb, i)) {
2175 if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
2181 cpp = fb->format->cpp[i];
2182 intel_fb_plane_dims(&width, &height, fb, i);
2184 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2186 drm_dbg_kms(&dev_priv->drm,
2187 "bad fb plane %d offset: 0x%x\n",
2192 ret = intel_fb_check_ccs_xy(fb, i, x, y);
2197 * The fence (if used) is aligned to the start of the object
2198 * so having the framebuffer wrap around across the edge of the
2199 * fenced region doesn't really work. We have no API to configure
2200 * the fence start offset within the object (nor could we probably
2201 * on gen2/3). So it's just easier if we just require that the
2202 * fb layout agrees with the fence layout. We already check that the
2203 * fb stride matches the fence stride elsewhere.
2205 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2206 (x + width) * cpp > fb->pitches[i]) {
2207 drm_dbg_kms(&dev_priv->drm,
2208 "bad fb plane %d offset: 0x%x\n",
2214 * First pixel of the framebuffer from
2215 * the start of the normal gtt mapping.
2217 intel_fb->normal[i].x = x;
2218 intel_fb->normal[i].y = y;
2220 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2224 offset /= tile_size;
2226 if (!is_surface_linear(fb, i)) {
2227 struct intel_remapped_plane_info plane_info;
2228 unsigned int tile_width, tile_height;
2230 intel_tile_dims(fb, i, &tile_width, &tile_height);
2232 plane_info.offset = offset;
2233 plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
2235 plane_info.width = DIV_ROUND_UP(x + width, tile_width);
2236 plane_info.height = DIV_ROUND_UP(y + height,
2239 /* how many tiles does this plane need */
2240 size = plane_info.stride * plane_info.height;
2242 * If the plane isn't horizontally tile aligned,
2243 * we need one more tile.
2248 gtt_offset_rotated +=
2249 setup_fb_rotation(i, &plane_info,
2251 x, y, width, height,
2253 tile_width, tile_height,
2256 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2257 x * cpp, tile_size);
2260 /* how many tiles in total needed in the bo */
2261 max_size = max(max_size, offset + size);
2264 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2265 drm_dbg_kms(&dev_priv->drm,
2266 "fb too big for bo (need %llu bytes, have %zu bytes)\n",
2267 mul_u32_u32(max_size, tile_size), obj->base.size);
2275 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2277 struct drm_i915_private *dev_priv =
2278 to_i915(plane_state->uapi.plane->dev);
2279 struct drm_framebuffer *fb = plane_state->hw.fb;
2280 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2281 struct intel_rotation_info *info = &plane_state->view.rotated;
2282 unsigned int rotation = plane_state->hw.rotation;
2283 int i, num_planes = fb->format->num_planes;
2284 unsigned int tile_size = intel_tile_size(dev_priv);
2285 unsigned int src_x, src_y;
2286 unsigned int src_w, src_h;
2289 memset(&plane_state->view, 0, sizeof(plane_state->view));
2290 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2291 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2293 src_x = plane_state->uapi.src.x1 >> 16;
2294 src_y = plane_state->uapi.src.y1 >> 16;
2295 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2296 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2298 drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
2300 /* Make src coordinates relative to the viewport */
2301 drm_rect_translate(&plane_state->uapi.src,
2302 -(src_x << 16), -(src_y << 16));
2304 /* Rotate src coordinates to match rotated GTT view */
2305 if (drm_rotation_90_or_270(rotation))
2306 drm_rect_rotate(&plane_state->uapi.src,
2307 src_w << 16, src_h << 16,
2308 DRM_MODE_ROTATE_270);
2310 for (i = 0; i < num_planes; i++) {
2311 unsigned int hsub = i ? fb->format->hsub : 1;
2312 unsigned int vsub = i ? fb->format->vsub : 1;
2313 unsigned int cpp = fb->format->cpp[i];
2314 unsigned int tile_width, tile_height;
2315 unsigned int width, height;
2316 unsigned int pitch_tiles;
2320 intel_tile_dims(fb, i, &tile_width, &tile_height);
2324 width = src_w / hsub;
2325 height = src_h / vsub;
2328 * First pixel of the src viewport from the
2329 * start of the normal gtt mapping.
2331 x += intel_fb->normal[i].x;
2332 y += intel_fb->normal[i].y;
2334 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2335 fb, i, fb->pitches[i],
2336 DRM_MODE_ROTATE_0, tile_size);
2337 offset /= tile_size;
2339 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
2340 info->plane[i].offset = offset;
2341 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2343 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2344 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2346 if (drm_rotation_90_or_270(rotation)) {
2349 /* rotate the x/y offsets to match the GTT view */
2350 drm_rect_init(&r, x, y, width, height);
2352 info->plane[i].width * tile_width,
2353 info->plane[i].height * tile_height,
2354 DRM_MODE_ROTATE_270);
2358 pitch_tiles = info->plane[i].height;
2359 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2361 /* rotate the tile dimensions to match the GTT view */
2362 swap(tile_width, tile_height);
2364 pitch_tiles = info->plane[i].width;
2365 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2369 * We only keep the x/y offsets, so push all of the
2370 * gtt offset into the x/y offsets.
2372 intel_adjust_tile_offset(&x, &y,
2373 tile_width, tile_height,
2374 tile_size, pitch_tiles,
2375 gtt_offset * tile_size, 0);
2377 gtt_offset += info->plane[i].width * info->plane[i].height;
2379 plane_state->color_plane[i].offset = 0;
2380 plane_state->color_plane[i].x = x;
2381 plane_state->color_plane[i].y = y;
2386 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2388 const struct intel_framebuffer *fb =
2389 to_intel_framebuffer(plane_state->hw.fb);
2390 unsigned int rotation = plane_state->hw.rotation;
2396 num_planes = fb->base.format->num_planes;
2398 if (intel_plane_needs_remap(plane_state)) {
2399 intel_plane_remap_gtt(plane_state);
2402 * Sometimes even remapping can't overcome
2403 * the stride limitations :( Can happen with
2404 * big plane sizes and suitably misaligned
2407 return intel_plane_check_stride(plane_state);
2410 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2412 for (i = 0; i < num_planes; i++) {
2413 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2414 plane_state->color_plane[i].offset = 0;
2416 if (drm_rotation_90_or_270(rotation)) {
2417 plane_state->color_plane[i].x = fb->rotated[i].x;
2418 plane_state->color_plane[i].y = fb->rotated[i].y;
2420 plane_state->color_plane[i].x = fb->normal[i].x;
2421 plane_state->color_plane[i].y = fb->normal[i].y;
2425 /* Rotate src coordinates to match rotated GTT view */
2426 if (drm_rotation_90_or_270(rotation))
2427 drm_rect_rotate(&plane_state->uapi.src,
2428 fb->base.width << 16, fb->base.height << 16,
2429 DRM_MODE_ROTATE_270);
2431 return intel_plane_check_stride(plane_state);
2434 static int i9xx_format_to_fourcc(int format)
2437 case DISPPLANE_8BPP:
2438 return DRM_FORMAT_C8;
2439 case DISPPLANE_BGRA555:
2440 return DRM_FORMAT_ARGB1555;
2441 case DISPPLANE_BGRX555:
2442 return DRM_FORMAT_XRGB1555;
2443 case DISPPLANE_BGRX565:
2444 return DRM_FORMAT_RGB565;
2446 case DISPPLANE_BGRX888:
2447 return DRM_FORMAT_XRGB8888;
2448 case DISPPLANE_RGBX888:
2449 return DRM_FORMAT_XBGR8888;
2450 case DISPPLANE_BGRA888:
2451 return DRM_FORMAT_ARGB8888;
2452 case DISPPLANE_RGBA888:
2453 return DRM_FORMAT_ABGR8888;
2454 case DISPPLANE_BGRX101010:
2455 return DRM_FORMAT_XRGB2101010;
2456 case DISPPLANE_RGBX101010:
2457 return DRM_FORMAT_XBGR2101010;
2458 case DISPPLANE_BGRA101010:
2459 return DRM_FORMAT_ARGB2101010;
2460 case DISPPLANE_RGBA101010:
2461 return DRM_FORMAT_ABGR2101010;
2462 case DISPPLANE_RGBX161616:
2463 return DRM_FORMAT_XBGR16161616F;
2467 static struct i915_vma *
2468 initial_plane_vma(struct drm_i915_private *i915,
2469 struct intel_initial_plane_config *plane_config)
2471 struct drm_i915_gem_object *obj;
2472 struct i915_vma *vma;
2475 if (plane_config->size == 0)
2478 base = round_down(plane_config->base,
2479 I915_GTT_MIN_ALIGNMENT);
2480 size = round_up(plane_config->base + plane_config->size,
2481 I915_GTT_MIN_ALIGNMENT);
2485 * If the FB is too big, just don't use it since fbdev is not very
2486 * important and we should probably use that space with FBC or other
2489 if (size * 2 > i915->stolen_usable_size)
2492 obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
2497 * Mark it WT ahead of time to avoid changing the
2498 * cache_level during fbdev initialization. The
2499 * unbind there would get stuck waiting for rcu.
2501 i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
2502 I915_CACHE_WT : I915_CACHE_NONE);
2504 switch (plane_config->tiling) {
2505 case I915_TILING_NONE:
2509 obj->tiling_and_stride =
2510 plane_config->fb->base.pitches[0] |
2511 plane_config->tiling;
2514 MISSING_CASE(plane_config->tiling);
2518 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
2522 if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
2525 if (i915_gem_object_is_tiled(obj) &&
2526 !i915_vma_is_map_and_fenceable(vma))
2532 i915_gem_object_put(obj);
2537 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2538 struct intel_initial_plane_config *plane_config)
2540 struct drm_device *dev = crtc->base.dev;
2541 struct drm_i915_private *dev_priv = to_i915(dev);
2542 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2543 struct drm_framebuffer *fb = &plane_config->fb->base;
2544 struct i915_vma *vma;
2546 switch (fb->modifier) {
2547 case DRM_FORMAT_MOD_LINEAR:
2548 case I915_FORMAT_MOD_X_TILED:
2549 case I915_FORMAT_MOD_Y_TILED:
2552 drm_dbg(&dev_priv->drm,
2553 "Unsupported modifier for initial FB: 0x%llx\n",
2558 vma = initial_plane_vma(dev_priv, plane_config);
2562 mode_cmd.pixel_format = fb->format->format;
2563 mode_cmd.width = fb->width;
2564 mode_cmd.height = fb->height;
2565 mode_cmd.pitches[0] = fb->pitches[0];
2566 mode_cmd.modifier[0] = fb->modifier;
2567 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2569 if (intel_framebuffer_init(to_intel_framebuffer(fb),
2570 vma->obj, &mode_cmd)) {
2571 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
2575 plane_config->vma = vma;
2584 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2585 struct intel_plane_state *plane_state,
2588 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2590 plane_state->uapi.visible = visible;
2593 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
2595 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
2598 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
2600 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2601 struct drm_plane *plane;
2604 * Active_planes aliases if multiple "primary" or cursor planes
2605 * have been used on the same (or wrong) pipe. plane_mask uses
2606 * unique ids, hence we can use that to reconstruct active_planes.
2608 crtc_state->enabled_planes = 0;
2609 crtc_state->active_planes = 0;
2611 drm_for_each_plane_mask(plane, &dev_priv->drm,
2612 crtc_state->uapi.plane_mask) {
2613 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
2614 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2618 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2619 struct intel_plane *plane)
2621 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2622 struct intel_crtc_state *crtc_state =
2623 to_intel_crtc_state(crtc->base.state);
2624 struct intel_plane_state *plane_state =
2625 to_intel_plane_state(plane->base.state);
2627 drm_dbg_kms(&dev_priv->drm,
2628 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2629 plane->base.base.id, plane->base.name,
2630 crtc->base.base.id, crtc->base.name);
2632 intel_set_plane_visible(crtc_state, plane_state, false);
2633 fixup_plane_bitmasks(crtc_state);
2634 crtc_state->data_rate[plane->id] = 0;
2635 crtc_state->min_cdclk[plane->id] = 0;
2637 if (plane->id == PLANE_PRIMARY)
2638 hsw_disable_ips(crtc_state);
2641 * Vblank time updates from the shadow to live plane control register
2642 * are blocked if the memory self-refresh mode is active at that
2643 * moment. So to make sure the plane gets truly disabled, disable
2644 * first the self-refresh mode. The self-refresh enable bit in turn
2645 * will be checked/applied by the HW only at the next frame start
2646 * event which is after the vblank start event, so we need to have a
2647 * wait-for-vblank between disabling the plane and the pipe.
2649 if (HAS_GMCH(dev_priv) &&
2650 intel_set_memory_cxsr(dev_priv, false))
2651 intel_wait_for_vblank(dev_priv, crtc->pipe);
2654 * Gen2 reports pipe underruns whenever all planes are disabled.
2655 * So disable underrun reporting before all the planes get disabled.
2657 if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
2658 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
2660 intel_disable_plane(plane, crtc_state);
2664 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2665 struct intel_initial_plane_config *plane_config)
2667 struct drm_device *dev = intel_crtc->base.dev;
2668 struct drm_i915_private *dev_priv = to_i915(dev);
2670 struct drm_plane *primary = intel_crtc->base.primary;
2671 struct drm_plane_state *plane_state = primary->state;
2672 struct intel_plane *intel_plane = to_intel_plane(primary);
2673 struct intel_plane_state *intel_state =
2674 to_intel_plane_state(plane_state);
2675 struct intel_crtc_state *crtc_state =
2676 to_intel_crtc_state(intel_crtc->base.state);
2677 struct drm_framebuffer *fb;
2678 struct i915_vma *vma;
2680 if (!plane_config->fb)
2683 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2684 fb = &plane_config->fb->base;
2685 vma = plane_config->vma;
2690 * Failed to alloc the obj, check to see if we should share
2691 * an fb with another CRTC instead
2693 for_each_crtc(dev, c) {
2694 struct intel_plane_state *state;
2696 if (c == &intel_crtc->base)
2699 if (!to_intel_crtc_state(c->state)->uapi.active)
2702 state = to_intel_plane_state(c->primary->state);
2706 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2714 * We've failed to reconstruct the BIOS FB. Current display state
2715 * indicates that the primary plane is visible, but has a NULL FB,
2716 * which will lead to problems later if we don't fix it up. The
2717 * simplest solution is to just disable the primary plane now and
2718 * pretend the BIOS never had it enabled.
2720 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2721 if (crtc_state->bigjoiner) {
2722 struct intel_crtc *slave =
2723 crtc_state->bigjoiner_linked_crtc;
2724 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2730 intel_state->hw.rotation = plane_config->rotation;
2731 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2732 intel_state->hw.rotation);
2733 intel_state->color_plane[0].stride =
2734 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
2736 __i915_vma_pin(vma);
2737 intel_state->vma = i915_vma_get(vma);
2738 if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
2740 intel_state->flags |= PLANE_HAS_FENCE;
2742 plane_state->src_x = 0;
2743 plane_state->src_y = 0;
2744 plane_state->src_w = fb->width << 16;
2745 plane_state->src_h = fb->height << 16;
2747 plane_state->crtc_x = 0;
2748 plane_state->crtc_y = 0;
2749 plane_state->crtc_w = fb->width;
2750 plane_state->crtc_h = fb->height;
2752 intel_state->uapi.src = drm_plane_state_src(plane_state);
2753 intel_state->uapi.dst = drm_plane_state_dest(plane_state);
2755 if (plane_config->tiling)
2756 dev_priv->preserve_bios_swizzle = true;
2758 plane_state->fb = fb;
2759 drm_framebuffer_get(fb);
2761 plane_state->crtc = &intel_crtc->base;
2762 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2765 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2767 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2768 &to_intel_frontbuffer(fb)->bits);
2772 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2776 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2777 plane_state->color_plane[0].offset, 0);
2782 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2784 struct drm_device *dev = intel_crtc->base.dev;
2785 struct drm_i915_private *dev_priv = to_i915(dev);
2786 unsigned long irqflags;
2788 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
2790 intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2791 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2792 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2794 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
2798 * This function detaches (aka. unbinds) unused scalers in hardware
2800 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
2802 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
2803 const struct intel_crtc_scaler_state *scaler_state =
2804 &crtc_state->scaler_state;
2807 /* loop through and disable scalers that aren't in use */
2808 for (i = 0; i < intel_crtc->num_scalers; i++) {
2809 if (!scaler_state->scalers[i].in_use)
2810 skl_detach_scaler(intel_crtc, i);
2815 __intel_display_resume(struct drm_device *dev,
2816 struct drm_atomic_state *state,
2817 struct drm_modeset_acquire_ctx *ctx)
2819 struct drm_crtc_state *crtc_state;
2820 struct drm_crtc *crtc;
2823 intel_modeset_setup_hw_state(dev, ctx);
2824 intel_vga_redisable(to_i915(dev));
2830 * We've duplicated the state, pointers to the old state are invalid.
2832 * Don't attempt to use the old state until we commit the duplicated state.
2834 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2836 * Force recalculation even if we restore
2837 * current state. With fast modeset this may not result
2838 * in a modeset when the state is compatible.
2840 crtc_state->mode_changed = true;
2843 /* ignore any reset values/BIOS leftovers in the WM registers */
2844 if (!HAS_GMCH(to_i915(dev)))
2845 to_intel_atomic_state(state)->skip_intermediate_wm = true;
2847 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2849 drm_WARN_ON(dev, ret == -EDEADLK);
2853 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2855 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2856 intel_has_gpu_reset(&dev_priv->gt));
2859 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2861 struct drm_device *dev = &dev_priv->drm;
2862 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2863 struct drm_atomic_state *state;
2866 if (!HAS_DISPLAY(dev_priv))
2869 /* reset doesn't touch the display */
2870 if (!dev_priv->params.force_reset_modeset_test &&
2871 !gpu_reset_clobbers_display(dev_priv))
2874 /* We have a modeset vs reset deadlock, defensively unbreak it. */
2875 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2876 smp_mb__after_atomic();
2877 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2879 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2880 drm_dbg_kms(&dev_priv->drm,
2881 "Modeset potentially stuck, unbreaking through wedging\n");
2882 intel_gt_set_wedged(&dev_priv->gt);
2886 * Need mode_config.mutex so that we don't
2887 * trample ongoing ->detect() and whatnot.
2889 mutex_lock(&dev->mode_config.mutex);
2890 drm_modeset_acquire_init(ctx, 0);
2892 ret = drm_modeset_lock_all_ctx(dev, ctx);
2893 if (ret != -EDEADLK)
2896 drm_modeset_backoff(ctx);
2899 * Disabling the crtcs gracefully seems nicer. Also the
2900 * g33 docs say we should at least disable all the planes.
2902 state = drm_atomic_helper_duplicate_state(dev, ctx);
2903 if (IS_ERR(state)) {
2904 ret = PTR_ERR(state);
2905 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2910 ret = drm_atomic_helper_disable_all(dev, ctx);
2912 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2914 drm_atomic_state_put(state);
2918 dev_priv->modeset_restore_state = state;
2919 state->acquire_ctx = ctx;
2922 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2924 struct drm_device *dev = &dev_priv->drm;
2925 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2926 struct drm_atomic_state *state;
2929 if (!HAS_DISPLAY(dev_priv))
2932 /* reset doesn't touch the display */
2933 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2936 state = fetch_and_zero(&dev_priv->modeset_restore_state);
2940 /* reset doesn't touch the display */
2941 if (!gpu_reset_clobbers_display(dev_priv)) {
2942 /* for testing only restore the display */
2943 ret = __intel_display_resume(dev, state, ctx);
2945 drm_err(&dev_priv->drm,
2946 "Restoring old state failed with %i\n", ret);
2949 * The display has been reset as well,
2950 * so need a full re-initialization.
2952 intel_pps_unlock_regs_wa(dev_priv);
2953 intel_modeset_init_hw(dev_priv);
2954 intel_init_clock_gating(dev_priv);
2955 intel_hpd_init(dev_priv);
2957 ret = __intel_display_resume(dev, state, ctx);
2959 drm_err(&dev_priv->drm,
2960 "Restoring old state failed with %i\n", ret);
2962 intel_hpd_poll_disable(dev_priv);
2965 drm_atomic_state_put(state);
2967 drm_modeset_drop_locks(ctx);
2968 drm_modeset_acquire_fini(ctx);
2969 mutex_unlock(&dev->mode_config.mutex);
2971 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2974 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2976 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2977 enum pipe pipe = crtc->pipe;
2980 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2983 * Display WA #1153: icl
2984 * enable hardware to bypass the alpha math
2985 * and rounding for per-pixel values 00 and 0xff
2987 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2989 * Display WA # 1605353570: icl
2990 * Set the pixel rounding bit to 1 for allowing
2991 * passthrough of Frame buffer pixels unmodified
2994 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2995 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2998 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
3000 struct drm_crtc *crtc;
3003 drm_for_each_crtc(crtc, &dev_priv->drm) {
3004 struct drm_crtc_commit *commit;
3005 spin_lock(&crtc->commit_lock);
3006 commit = list_first_entry_or_null(&crtc->commit_list,
3007 struct drm_crtc_commit, commit_entry);
3008 cleanup_done = commit ?
3009 try_wait_for_completion(&commit->cleanup_done) : true;
3010 spin_unlock(&crtc->commit_lock);
3015 drm_crtc_wait_one_vblank(crtc);
3023 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3027 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
3029 mutex_lock(&dev_priv->sb_lock);
3031 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3032 temp |= SBI_SSCCTL_DISABLE;
3033 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3035 mutex_unlock(&dev_priv->sb_lock);
3038 /* Program iCLKIP clock to the desired frequency */
3039 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
3041 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3042 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3043 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
3044 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3047 lpt_disable_iclkip(dev_priv);
3049 /* The iCLK virtual clock root frequency is in MHz,
3050 * but the adjusted_mode->crtc_clock in in KHz. To get the
3051 * divisors, it is necessary to divide one by another, so we
3052 * convert the virtual clock precision to KHz here for higher
3055 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3056 u32 iclk_virtual_root_freq = 172800 * 1000;
3057 u32 iclk_pi_range = 64;
3058 u32 desired_divisor;
3060 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3062 divsel = (desired_divisor / iclk_pi_range) - 2;
3063 phaseinc = desired_divisor % iclk_pi_range;
3066 * Near 20MHz is a corner case which is
3067 * out of range for the 7-bit divisor
3073 /* This should not happen with any sane values */
3074 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3075 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3076 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
3077 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3079 drm_dbg_kms(&dev_priv->drm,
3080 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3081 clock, auxdiv, divsel, phasedir, phaseinc);
3083 mutex_lock(&dev_priv->sb_lock);
3085 /* Program SSCDIVINTPHASE6 */
3086 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3087 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3088 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3089 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3090 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3091 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3092 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3093 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3095 /* Program SSCAUXDIV */
3096 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3097 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3098 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3099 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3101 /* Enable modulator and associated divider */
3102 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3103 temp &= ~SBI_SSCCTL_DISABLE;
3104 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3106 mutex_unlock(&dev_priv->sb_lock);
3108 /* Wait for initialization time */
3111 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3114 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3116 u32 divsel, phaseinc, auxdiv;
3117 u32 iclk_virtual_root_freq = 172800 * 1000;
3118 u32 iclk_pi_range = 64;
3119 u32 desired_divisor;
3122 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3125 mutex_lock(&dev_priv->sb_lock);
3127 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3128 if (temp & SBI_SSCCTL_DISABLE) {
3129 mutex_unlock(&dev_priv->sb_lock);
3133 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3134 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3135 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3136 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3137 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3139 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3140 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3141 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3143 mutex_unlock(&dev_priv->sb_lock);
3145 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3147 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3148 desired_divisor << auxdiv);
3151 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
3152 enum pipe pch_transcoder)
3154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3156 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3158 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
3159 intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
3160 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
3161 intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
3162 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
3163 intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
3165 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
3166 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3167 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
3168 intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
3169 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
3170 intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
3171 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3172 intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
3175 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
3179 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
3180 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
3183 drm_WARN_ON(&dev_priv->drm,
3184 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
3186 drm_WARN_ON(&dev_priv->drm,
3187 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
3190 temp &= ~FDI_BC_BIFURCATION_SELECT;
3192 temp |= FDI_BC_BIFURCATION_SELECT;
3194 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
3195 enable ? "en" : "dis");
3196 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
3197 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
3200 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
3202 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3203 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3205 switch (crtc->pipe) {
3209 if (crtc_state->fdi_lanes > 2)
3210 cpt_set_fdi_bc_bifurcation(dev_priv, false);
3212 cpt_set_fdi_bc_bifurcation(dev_priv, true);
3216 cpt_set_fdi_bc_bifurcation(dev_priv, true);
3225 * Finds the encoder associated with the given CRTC. This can only be
3226 * used when we know that the CRTC isn't feeding multiple encoders!
3228 struct intel_encoder *
3229 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
3230 const struct intel_crtc_state *crtc_state)
3232 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3233 const struct drm_connector_state *connector_state;
3234 const struct drm_connector *connector;
3235 struct intel_encoder *encoder = NULL;
3236 int num_encoders = 0;
3239 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
3240 if (connector_state->crtc != &crtc->base)
3243 encoder = to_intel_encoder(connector_state->best_encoder);
3247 drm_WARN(encoder->base.dev, num_encoders != 1,
3248 "%d encoders for pipe %c\n",
3249 num_encoders, pipe_name(crtc->pipe));
3255 * Enable PCH resources required for PCH ports:
3257 * - FDI training & RX/TX
3258 * - update transcoder timings
3259 * - DP transcoding bits
3262 static void ilk_pch_enable(const struct intel_atomic_state *state,
3263 const struct intel_crtc_state *crtc_state)
3265 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3266 struct drm_device *dev = crtc->base.dev;
3267 struct drm_i915_private *dev_priv = to_i915(dev);
3268 enum pipe pipe = crtc->pipe;
3271 assert_pch_transcoder_disabled(dev_priv, pipe);
3273 if (IS_IVYBRIDGE(dev_priv))
3274 ivb_update_fdi_bc_bifurcation(crtc_state);
3276 /* Write the TU size bits before fdi link training, so that error
3277 * detection works. */
3278 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
3279 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3281 /* For PCH output, training FDI link */
3282 dev_priv->display.fdi_link_train(crtc, crtc_state);
3284 /* We need to program the right clock selection before writing the pixel
3285 * mutliplier into the DPLL. */
3286 if (HAS_PCH_CPT(dev_priv)) {
3289 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3290 temp |= TRANS_DPLL_ENABLE(pipe);
3291 sel = TRANS_DPLLB_SEL(pipe);
3292 if (crtc_state->shared_dpll ==
3293 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
3297 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3300 /* XXX: pch pll's can be enabled any time before we enable the PCH
3301 * transcoder, and we actually should do this to not upset any PCH
3302 * transcoder that already use the clock when we share it.
3304 * Note that enable_shared_dpll tries to do the right thing, but
3305 * get_shared_dpll unconditionally resets the pll - we need that to have
3306 * the right LVDS enable sequence. */
3307 intel_enable_shared_dpll(crtc_state);
3309 /* set transcoder timing, panel must allow it */
3310 assert_panel_unlocked(dev_priv, pipe);
3311 ilk_pch_transcoder_set_timings(crtc_state, pipe);
3313 intel_fdi_normal_train(crtc);
3315 /* For PCH DP, enable TRANS_DP_CTL */
3316 if (HAS_PCH_CPT(dev_priv) &&
3317 intel_crtc_has_dp_encoder(crtc_state)) {
3318 const struct drm_display_mode *adjusted_mode =
3319 &crtc_state->hw.adjusted_mode;
3320 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3321 i915_reg_t reg = TRANS_DP_CTL(pipe);
3324 temp = intel_de_read(dev_priv, reg);
3325 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3326 TRANS_DP_SYNC_MASK |
3328 temp |= TRANS_DP_OUTPUT_ENABLE;
3329 temp |= bpc << 9; /* same format but at 11:9 */
3331 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
3332 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3333 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
3334 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3336 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
3337 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
3338 temp |= TRANS_DP_PORT_SEL(port);
3340 intel_de_write(dev_priv, reg, temp);
3343 ilk_enable_pch_transcoder(crtc_state);
3346 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
3348 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3349 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3350 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3352 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
3354 lpt_program_iclkip(crtc_state);
3356 /* Set transcoder timing. */
3357 ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
3359 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3362 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
3365 i915_reg_t dslreg = PIPEDSL(pipe);
3368 temp = intel_de_read(dev_priv, dslreg);
3370 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
3371 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
3372 drm_err(&dev_priv->drm,
3373 "mode set failed: pipe %c stuck\n",
3379 * The hardware phase 0.0 refers to the center of the pixel.
3380 * We want to start from the top/left edge which is phase
3381 * -0.5. That matches how the hardware calculates the scaling
3382 * factors (from top-left of the first pixel to bottom-right
3383 * of the last pixel, as opposed to the pixel centers).
3385 * For 4:2:0 subsampled chroma planes we obviously have to
3386 * adjust that so that the chroma sample position lands in
3389 * Note that for packed YCbCr 4:2:2 formats there is no way to
3390 * control chroma siting. The hardware simply replicates the
3391 * chroma samples for both of the luma samples, and thus we don't
3392 * actually get the expected MPEG2 chroma siting convention :(
3393 * The same behaviour is observed on pre-SKL platforms as well.
3395 * Theory behind the formula (note that we ignore sub-pixel
3396 * source coordinates):
3397 * s = source sample position
3398 * d = destination sample position
3403 * | | 1.5 (initial phase)
3411 * | -0.375 (initial phase)
3418 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
3420 int phase = -0x8000;
3424 phase += (sub - 1) * 0x8000 / sub;
3426 phase += scale / (2 * sub);
3429 * Hardware initial phase limited to [-0.5:1.5].
3430 * Since the max hardware scale factor is 3.0, we
3431 * should never actually excdeed 1.0 here.
3433 WARN_ON(phase < -0x8000 || phase > 0x18000);
3436 phase = 0x10000 + phase;
3438 trip = PS_PHASE_TRIP;
3440 return ((phase >> 2) & PS_PHASE_MASK) | trip;
3443 #define SKL_MIN_SRC_W 8
3444 #define SKL_MAX_SRC_W 4096
3445 #define SKL_MIN_SRC_H 8
3446 #define SKL_MAX_SRC_H 4096
3447 #define SKL_MIN_DST_W 8
3448 #define SKL_MAX_DST_W 4096
3449 #define SKL_MIN_DST_H 8
3450 #define SKL_MAX_DST_H 4096
3451 #define ICL_MAX_SRC_W 5120
3452 #define ICL_MAX_SRC_H 4096
3453 #define ICL_MAX_DST_W 5120
3454 #define ICL_MAX_DST_H 4096
3455 #define SKL_MIN_YUV_420_SRC_W 16
3456 #define SKL_MIN_YUV_420_SRC_H 16
3459 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
3460 unsigned int scaler_user, int *scaler_id,
3461 int src_w, int src_h, int dst_w, int dst_h,
3462 const struct drm_format_info *format,
3463 u64 modifier, bool need_scaler)
3465 struct intel_crtc_scaler_state *scaler_state =
3466 &crtc_state->scaler_state;
3467 struct intel_crtc *intel_crtc =
3468 to_intel_crtc(crtc_state->uapi.crtc);
3469 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3470 const struct drm_display_mode *adjusted_mode =
3471 &crtc_state->hw.adjusted_mode;
3474 * Src coordinates are already rotated by 270 degrees for
3475 * the 90/270 degree plane rotation cases (to match the
3476 * GTT mapping), hence no need to account for rotation here.
3478 if (src_w != dst_w || src_h != dst_h)
3482 * Scaling/fitting not supported in IF-ID mode in GEN9+
3483 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
3484 * Once NV12 is enabled, handle it here while allocating scaler
3487 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
3488 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3489 drm_dbg_kms(&dev_priv->drm,
3490 "Pipe/Plane scaling not supported with IF-ID mode\n");
3495 * if plane is being disabled or scaler is no more required or force detach
3496 * - free scaler binded to this plane/crtc
3497 * - in order to do this, update crtc->scaler_usage
3499 * Here scaler state in crtc_state is set free so that
3500 * scaler can be assigned to other user. Actual register
3501 * update to free the scaler is done in plane/panel-fit programming.
3502 * For this purpose crtc/plane_state->scaler_id isn't reset here.
3504 if (force_detach || !need_scaler) {
3505 if (*scaler_id >= 0) {
3506 scaler_state->scaler_users &= ~(1 << scaler_user);
3507 scaler_state->scalers[*scaler_id].in_use = 0;
3509 drm_dbg_kms(&dev_priv->drm,
3510 "scaler_user index %u.%u: "
3511 "Staged freeing scaler id %d scaler_users = 0x%x\n",
3512 intel_crtc->pipe, scaler_user, *scaler_id,
3513 scaler_state->scaler_users);
3519 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
3520 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
3521 drm_dbg_kms(&dev_priv->drm,
3522 "Planar YUV: src dimensions not met\n");
3527 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
3528 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
3529 (INTEL_GEN(dev_priv) >= 11 &&
3530 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
3531 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
3532 (INTEL_GEN(dev_priv) < 11 &&
3533 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
3534 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
3535 drm_dbg_kms(&dev_priv->drm,
3536 "scaler_user index %u.%u: src %ux%u dst %ux%u "
3537 "size is out of scaler range\n",
3538 intel_crtc->pipe, scaler_user, src_w, src_h,
3543 /* mark this plane as a scaler user in crtc_state */
3544 scaler_state->scaler_users |= (1 << scaler_user);
3545 drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
3546 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
3547 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
3548 scaler_state->scaler_users);
3553 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
3555 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
3558 if (crtc_state->pch_pfit.enabled) {
3559 width = drm_rect_width(&crtc_state->pch_pfit.dst);
3560 height = drm_rect_height(&crtc_state->pch_pfit.dst);
3562 width = pipe_mode->crtc_hdisplay;
3563 height = pipe_mode->crtc_vdisplay;
3565 return skl_update_scaler(crtc_state, !crtc_state->hw.active,
3567 &crtc_state->scaler_state.scaler_id,
3568 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
3569 width, height, NULL, 0,
3570 crtc_state->pch_pfit.enabled);
3574 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
3575 * @crtc_state: crtc's scaler state
3576 * @plane_state: atomic plane state to update
3579 * 0 - scaler_usage updated successfully
3580 * error - requested scaling cannot be supported or other error condition
3582 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
3583 struct intel_plane_state *plane_state)
3585 struct intel_plane *intel_plane =
3586 to_intel_plane(plane_state->uapi.plane);
3587 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
3588 struct drm_framebuffer *fb = plane_state->hw.fb;
3590 bool force_detach = !fb || !plane_state->uapi.visible;
3591 bool need_scaler = false;
3593 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
3594 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
3595 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
3598 ret = skl_update_scaler(crtc_state, force_detach,
3599 drm_plane_index(&intel_plane->base),
3600 &plane_state->scaler_id,
3601 drm_rect_width(&plane_state->uapi.src) >> 16,
3602 drm_rect_height(&plane_state->uapi.src) >> 16,
3603 drm_rect_width(&plane_state->uapi.dst),
3604 drm_rect_height(&plane_state->uapi.dst),
3605 fb ? fb->format : NULL,
3606 fb ? fb->modifier : 0,
3609 if (ret || plane_state->scaler_id < 0)
3612 /* check colorkey */
3613 if (plane_state->ckey.flags) {
3614 drm_dbg_kms(&dev_priv->drm,
3615 "[PLANE:%d:%s] scaling with color key not allowed",
3616 intel_plane->base.base.id,
3617 intel_plane->base.name);
3621 /* Check src format */
3622 switch (fb->format->format) {
3623 case DRM_FORMAT_RGB565:
3624 case DRM_FORMAT_XBGR8888:
3625 case DRM_FORMAT_XRGB8888:
3626 case DRM_FORMAT_ABGR8888:
3627 case DRM_FORMAT_ARGB8888:
3628 case DRM_FORMAT_XRGB2101010:
3629 case DRM_FORMAT_XBGR2101010:
3630 case DRM_FORMAT_ARGB2101010:
3631 case DRM_FORMAT_ABGR2101010:
3632 case DRM_FORMAT_YUYV:
3633 case DRM_FORMAT_YVYU:
3634 case DRM_FORMAT_UYVY:
3635 case DRM_FORMAT_VYUY:
3636 case DRM_FORMAT_NV12:
3637 case DRM_FORMAT_XYUV8888:
3638 case DRM_FORMAT_P010:
3639 case DRM_FORMAT_P012:
3640 case DRM_FORMAT_P016:
3641 case DRM_FORMAT_Y210:
3642 case DRM_FORMAT_Y212:
3643 case DRM_FORMAT_Y216:
3644 case DRM_FORMAT_XVYU2101010:
3645 case DRM_FORMAT_XVYU12_16161616:
3646 case DRM_FORMAT_XVYU16161616:
3648 case DRM_FORMAT_XBGR16161616F:
3649 case DRM_FORMAT_ABGR16161616F:
3650 case DRM_FORMAT_XRGB16161616F:
3651 case DRM_FORMAT_ARGB16161616F:
3652 if (INTEL_GEN(dev_priv) >= 11)
3656 drm_dbg_kms(&dev_priv->drm,
3657 "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
3658 intel_plane->base.base.id, intel_plane->base.name,
3659 fb->base.id, fb->format->format);
3666 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
3668 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3671 for (i = 0; i < crtc->num_scalers; i++)
3672 skl_detach_scaler(crtc, i);
3675 static int cnl_coef_tap(int i)
3680 static u16 cnl_nearest_filter_coef(int t)
3682 return t == 3 ? 0x0800 : 0x3000;
3686 * Theory behind setting nearest-neighbor integer scaling:
3688 * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
3689 * The letter represents the filter tap (D is the center tap) and the number
3690 * represents the coefficient set for a phase (0-16).
3692 * +------------+------------------------+------------------------+
3693 * |Index value | Data value coeffient 1 | Data value coeffient 2 |
3694 * +------------+------------------------+------------------------+
3696 * +------------+------------------------+------------------------+
3698 * +------------+------------------------+------------------------+
3700 * +------------+------------------------+------------------------+
3702 * +------------+------------------------+------------------------+
3704 * +------------+------------------------+------------------------+
3705 * | ... | ... | ... |
3706 * +------------+------------------------+------------------------+
3707 * | 38h | B16 | A16 |
3708 * +------------+------------------------+------------------------+
3709 * | 39h | D16 | C16 |
3710 * +------------+------------------------+------------------------+
3711 * | 3Ah | F16 | C16 |
3712 * +------------+------------------------+------------------------+
3713 * | 3Bh | Reserved | G16 |
3714 * +------------+------------------------+------------------------+
3716 * To enable nearest-neighbor scaling: program scaler coefficents with
3717 * the center tap (Dxx) values set to 1 and all other values set to 0 as per
3718 * SCALER_COEFFICIENT_FORMAT
3722 static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
3723 enum pipe pipe, int id, int set)
3727 intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
3728 PS_COEE_INDEX_AUTO_INC);
3730 for (i = 0; i < 17 * 7; i += 2) {
3734 t = cnl_coef_tap(i);
3735 tmp = cnl_nearest_filter_coef(t);
3737 t = cnl_coef_tap(i + 1);
3738 tmp |= cnl_nearest_filter_coef(t) << 16;
3740 intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
3744 intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
3747 u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
3749 if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
3750 return (PS_FILTER_PROGRAMMED |
3751 PS_Y_VERT_FILTER_SELECT(set) |
3752 PS_Y_HORZ_FILTER_SELECT(set) |
3753 PS_UV_VERT_FILTER_SELECT(set) |
3754 PS_UV_HORZ_FILTER_SELECT(set));
3757 return PS_FILTER_MEDIUM;
3760 void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
3761 int id, int set, enum drm_scaling_filter filter)
3764 case DRM_SCALING_FILTER_DEFAULT:
3766 case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
3767 cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
3770 MISSING_CASE(filter);
3774 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
3776 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3777 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3778 const struct intel_crtc_scaler_state *scaler_state =
3779 &crtc_state->scaler_state;
3780 struct drm_rect src = {
3781 .x2 = crtc_state->pipe_src_w << 16,
3782 .y2 = crtc_state->pipe_src_h << 16,
3784 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3785 u16 uv_rgb_hphase, uv_rgb_vphase;
3786 enum pipe pipe = crtc->pipe;
3787 int width = drm_rect_width(dst);
3788 int height = drm_rect_height(dst);
3792 unsigned long irqflags;
3796 if (!crtc_state->pch_pfit.enabled)
3799 if (drm_WARN_ON(&dev_priv->drm,
3800 crtc_state->scaler_state.scaler_id < 0))
3803 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
3804 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
3806 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
3807 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
3809 id = scaler_state->scaler_id;
3811 ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
3812 ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
3814 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3816 skl_scaler_setup_filter(dev_priv, pipe, id, 0,
3817 crtc_state->hw.scaling_filter);
3819 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
3821 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
3822 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
3823 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
3824 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
3825 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
3827 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
3828 width << 16 | height);
3830 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3833 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
3835 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3836 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3837 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3838 enum pipe pipe = crtc->pipe;
3839 int width = drm_rect_width(dst);
3840 int height = drm_rect_height(dst);
3844 if (!crtc_state->pch_pfit.enabled)
3847 /* Force use of hard-coded filter coefficients
3848 * as some pre-programmed values are broken,
3851 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3852 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3853 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
3855 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3857 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
3858 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
3861 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
3863 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3864 struct drm_device *dev = crtc->base.dev;
3865 struct drm_i915_private *dev_priv = to_i915(dev);
3867 if (!crtc_state->ips_enabled)
3871 * We can only enable IPS after we enable a plane and wait for a vblank
3872 * This function is called from post_plane_update, which is run after
3875 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
3877 if (IS_BROADWELL(dev_priv)) {
3878 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
3879 IPS_ENABLE | IPS_PCODE_CONTROL));
3880 /* Quoting Art Runyan: "its not safe to expect any particular
3881 * value in IPS_CTL bit 31 after enabling IPS through the
3882 * mailbox." Moreover, the mailbox may return a bogus state,
3883 * so we need to just enable it and continue on.
3886 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
3887 /* The bit only becomes 1 in the next vblank, so this wait here
3888 * is essentially intel_wait_for_vblank. If we don't have this
3889 * and don't wait for vblanks until the end of crtc_enable, then
3890 * the HW state readout code will complain that the expected
3891 * IPS_CTL value is not the one we read. */
3892 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
3893 drm_err(&dev_priv->drm,
3894 "Timed out waiting for IPS enable\n");
3898 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
3900 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3901 struct drm_device *dev = crtc->base.dev;
3902 struct drm_i915_private *dev_priv = to_i915(dev);
3904 if (!crtc_state->ips_enabled)
3907 if (IS_BROADWELL(dev_priv)) {
3909 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3911 * Wait for PCODE to finish disabling IPS. The BSpec specified
3912 * 42ms timeout value leads to occasional timeouts so use 100ms
3915 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
3916 drm_err(&dev_priv->drm,
3917 "Timed out waiting for IPS disable\n");
3919 intel_de_write(dev_priv, IPS_CTL, 0);
3920 intel_de_posting_read(dev_priv, IPS_CTL);
3923 /* We need to wait for a vblank before we can disable the plane. */
3924 intel_wait_for_vblank(dev_priv, crtc->pipe);
3927 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
3929 if (intel_crtc->overlay)
3930 (void) intel_overlay_switch_off(intel_crtc->overlay);
3932 /* Let userspace switch the overlay on again. In most cases userspace
3933 * has to recompute where to put it anyway.
3937 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
3938 const struct intel_crtc_state *new_crtc_state)
3940 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3941 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3943 if (!old_crtc_state->ips_enabled)
3946 if (intel_crtc_needs_modeset(new_crtc_state))
3950 * Workaround : Do not read or write the pipe palette/gamma data while
3951 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3953 * Disable IPS before we program the LUT.
3955 if (IS_HASWELL(dev_priv) &&
3956 (new_crtc_state->uapi.color_mgmt_changed ||
3957 new_crtc_state->update_pipe) &&
3958 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3961 return !new_crtc_state->ips_enabled;
3964 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
3965 const struct intel_crtc_state *new_crtc_state)
3967 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3968 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3970 if (!new_crtc_state->ips_enabled)
3973 if (intel_crtc_needs_modeset(new_crtc_state))
3977 * Workaround : Do not read or write the pipe palette/gamma data while
3978 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3980 * Re-enable IPS after the LUT has been programmed.
3982 if (IS_HASWELL(dev_priv) &&
3983 (new_crtc_state->uapi.color_mgmt_changed ||
3984 new_crtc_state->update_pipe) &&
3985 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3989 * We can't read out IPS on broadwell, assume the worst and
3990 * forcibly enable IPS on the first fastset.
3992 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
3995 return !old_crtc_state->ips_enabled;
3998 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
4000 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4002 if (!crtc_state->nv12_planes)
4005 /* WA Display #0827: Gen9:all */
4006 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
4012 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
4014 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4016 /* Wa_2006604312:icl,ehl */
4017 if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
4023 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
4024 const struct intel_crtc_state *new_crtc_state)
4026 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
4027 new_crtc_state->active_planes;
4030 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
4031 const struct intel_crtc_state *new_crtc_state)
4033 return old_crtc_state->active_planes &&
4034 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
4037 static void intel_post_plane_update(struct intel_atomic_state *state,
4038 struct intel_crtc *crtc)
4040 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4041 const struct intel_crtc_state *old_crtc_state =
4042 intel_atomic_get_old_crtc_state(state, crtc);
4043 const struct intel_crtc_state *new_crtc_state =
4044 intel_atomic_get_new_crtc_state(state, crtc);
4045 enum pipe pipe = crtc->pipe;
4047 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
4049 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
4050 intel_update_watermarks(crtc);
4052 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
4053 hsw_enable_ips(new_crtc_state);
4055 intel_fbc_post_update(state, crtc);
4057 if (needs_nv12_wa(old_crtc_state) &&
4058 !needs_nv12_wa(new_crtc_state))
4059 skl_wa_827(dev_priv, pipe, false);
4061 if (needs_scalerclk_wa(old_crtc_state) &&
4062 !needs_scalerclk_wa(new_crtc_state))
4063 icl_wa_scalerclkgating(dev_priv, pipe, false);
4066 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
4067 struct intel_crtc *crtc)
4069 const struct intel_crtc_state *crtc_state =
4070 intel_atomic_get_new_crtc_state(state, crtc);
4071 u8 update_planes = crtc_state->update_planes;
4072 const struct intel_plane_state *plane_state;
4073 struct intel_plane *plane;
4076 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4077 if (plane->enable_flip_done &&
4078 plane->pipe == crtc->pipe &&
4079 update_planes & BIT(plane->id))
4080 plane->enable_flip_done(plane);
4084 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
4085 struct intel_crtc *crtc)
4087 const struct intel_crtc_state *crtc_state =
4088 intel_atomic_get_new_crtc_state(state, crtc);
4089 u8 update_planes = crtc_state->update_planes;
4090 const struct intel_plane_state *plane_state;
4091 struct intel_plane *plane;
4094 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4095 if (plane->disable_flip_done &&
4096 plane->pipe == crtc->pipe &&
4097 update_planes & BIT(plane->id))
4098 plane->disable_flip_done(plane);
4102 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
4103 struct intel_crtc *crtc)
4105 struct drm_i915_private *i915 = to_i915(state->base.dev);
4106 const struct intel_crtc_state *old_crtc_state =
4107 intel_atomic_get_old_crtc_state(state, crtc);
4108 const struct intel_crtc_state *new_crtc_state =
4109 intel_atomic_get_new_crtc_state(state, crtc);
4110 u8 update_planes = new_crtc_state->update_planes;
4111 const struct intel_plane_state *old_plane_state;
4112 struct intel_plane *plane;
4113 bool need_vbl_wait = false;
4116 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
4117 if (plane->need_async_flip_disable_wa &&
4118 plane->pipe == crtc->pipe &&
4119 update_planes & BIT(plane->id)) {
4121 * Apart from the async flip bit we want to
4122 * preserve the old state for the plane.
4124 plane->async_flip(plane, old_crtc_state,
4125 old_plane_state, false);
4126 need_vbl_wait = true;
4131 intel_wait_for_vblank(i915, crtc->pipe);
4134 static void intel_pre_plane_update(struct intel_atomic_state *state,
4135 struct intel_crtc *crtc)
4137 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4138 const struct intel_crtc_state *old_crtc_state =
4139 intel_atomic_get_old_crtc_state(state, crtc);
4140 const struct intel_crtc_state *new_crtc_state =
4141 intel_atomic_get_new_crtc_state(state, crtc);
4142 enum pipe pipe = crtc->pipe;
4144 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
4145 hsw_disable_ips(old_crtc_state);
4147 if (intel_fbc_pre_update(state, crtc))
4148 intel_wait_for_vblank(dev_priv, pipe);
4150 /* Display WA 827 */
4151 if (!needs_nv12_wa(old_crtc_state) &&
4152 needs_nv12_wa(new_crtc_state))
4153 skl_wa_827(dev_priv, pipe, true);
4155 /* Wa_2006604312:icl,ehl */
4156 if (!needs_scalerclk_wa(old_crtc_state) &&
4157 needs_scalerclk_wa(new_crtc_state))
4158 icl_wa_scalerclkgating(dev_priv, pipe, true);
4161 * Vblank time updates from the shadow to live plane control register
4162 * are blocked if the memory self-refresh mode is active at that
4163 * moment. So to make sure the plane gets truly disabled, disable
4164 * first the self-refresh mode. The self-refresh enable bit in turn
4165 * will be checked/applied by the HW only at the next frame start
4166 * event which is after the vblank start event, so we need to have a
4167 * wait-for-vblank between disabling the plane and the pipe.
4169 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
4170 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
4171 intel_wait_for_vblank(dev_priv, pipe);
4174 * IVB workaround: must disable low power watermarks for at least
4175 * one frame before enabling scaling. LP watermarks can be re-enabled
4176 * when scaling is disabled.
4178 * WaCxSRDisabledForSpriteScaling:ivb
4180 if (old_crtc_state->hw.active &&
4181 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
4182 intel_wait_for_vblank(dev_priv, pipe);
4185 * If we're doing a modeset we don't need to do any
4186 * pre-vblank watermark programming here.
4188 if (!intel_crtc_needs_modeset(new_crtc_state)) {
4190 * For platforms that support atomic watermarks, program the
4191 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
4192 * will be the intermediate values that are safe for both pre- and
4193 * post- vblank; when vblank happens, the 'active' values will be set
4194 * to the final 'target' values and we'll do this again to get the
4195 * optimal watermarks. For gen9+ platforms, the values we program here
4196 * will be the final target values which will get automatically latched
4197 * at vblank time; no further programming will be necessary.
4199 * If a platform hasn't been transitioned to atomic watermarks yet,
4200 * we'll continue to update watermarks the old way, if flags tell
4203 if (dev_priv->display.initial_watermarks)
4204 dev_priv->display.initial_watermarks(state, crtc);
4205 else if (new_crtc_state->update_wm_pre)
4206 intel_update_watermarks(crtc);
4210 * Gen2 reports pipe underruns whenever all planes are disabled.
4211 * So disable underrun reporting before all the planes get disabled.
4213 * We do this after .initial_watermarks() so that we have a
4214 * chance of catching underruns with the intermediate watermarks
4215 * vs. the old plane configuration.
4217 if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
4218 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4221 * WA for platforms where async address update enable bit
4222 * is double buffered and only latched at start of vblank.
4224 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
4225 intel_crtc_async_flip_disable_wa(state, crtc);
4228 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
4229 struct intel_crtc *crtc)
4231 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4232 const struct intel_crtc_state *new_crtc_state =
4233 intel_atomic_get_new_crtc_state(state, crtc);
4234 unsigned int update_mask = new_crtc_state->update_planes;
4235 const struct intel_plane_state *old_plane_state;
4236 struct intel_plane *plane;
4237 unsigned fb_bits = 0;
4240 intel_crtc_dpms_overlay_disable(crtc);
4242 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
4243 if (crtc->pipe != plane->pipe ||
4244 !(update_mask & BIT(plane->id)))
4247 intel_disable_plane(plane, new_crtc_state);
4249 if (old_plane_state->uapi.visible)
4250 fb_bits |= plane->frontbuffer_bit;
4253 intel_frontbuffer_flip(dev_priv, fb_bits);
4257 * intel_connector_primary_encoder - get the primary encoder for a connector
4258 * @connector: connector for which to return the encoder
4260 * Returns the primary encoder for a connector. There is a 1:1 mapping from
4261 * all connectors to their encoder, except for DP-MST connectors which have
4262 * both a virtual and a primary encoder. These DP-MST primary encoders can be
4263 * pointed to by as many DP-MST connectors as there are pipes.
4265 static struct intel_encoder *
4266 intel_connector_primary_encoder(struct intel_connector *connector)
4268 struct intel_encoder *encoder;
4270 if (connector->mst_port)
4271 return &dp_to_dig_port(connector->mst_port)->base;
4273 encoder = intel_attached_encoder(connector);
4274 drm_WARN_ON(connector->base.dev, !encoder);
4279 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
4281 struct drm_connector_state *new_conn_state;
4282 struct drm_connector *connector;
4285 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
4287 struct intel_connector *intel_connector;
4288 struct intel_encoder *encoder;
4289 struct intel_crtc *crtc;
4291 if (!intel_connector_needs_modeset(state, connector))
4294 intel_connector = to_intel_connector(connector);
4295 encoder = intel_connector_primary_encoder(intel_connector);
4296 if (!encoder->update_prepare)
4299 crtc = new_conn_state->crtc ?
4300 to_intel_crtc(new_conn_state->crtc) : NULL;
4301 encoder->update_prepare(state, encoder, crtc);
4305 static void intel_encoders_update_complete(struct intel_atomic_state *state)
4307 struct drm_connector_state *new_conn_state;
4308 struct drm_connector *connector;
4311 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
4313 struct intel_connector *intel_connector;
4314 struct intel_encoder *encoder;
4315 struct intel_crtc *crtc;
4317 if (!intel_connector_needs_modeset(state, connector))
4320 intel_connector = to_intel_connector(connector);
4321 encoder = intel_connector_primary_encoder(intel_connector);
4322 if (!encoder->update_complete)
4325 crtc = new_conn_state->crtc ?
4326 to_intel_crtc(new_conn_state->crtc) : NULL;
4327 encoder->update_complete(state, encoder, crtc);
4331 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
4332 struct intel_crtc *crtc)
4334 const struct intel_crtc_state *crtc_state =
4335 intel_atomic_get_new_crtc_state(state, crtc);
4336 const struct drm_connector_state *conn_state;
4337 struct drm_connector *conn;
4340 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4341 struct intel_encoder *encoder =
4342 to_intel_encoder(conn_state->best_encoder);
4344 if (conn_state->crtc != &crtc->base)
4347 if (encoder->pre_pll_enable)
4348 encoder->pre_pll_enable(state, encoder,
4349 crtc_state, conn_state);
4353 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
4354 struct intel_crtc *crtc)
4356 const struct intel_crtc_state *crtc_state =
4357 intel_atomic_get_new_crtc_state(state, crtc);
4358 const struct drm_connector_state *conn_state;
4359 struct drm_connector *conn;
4362 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4363 struct intel_encoder *encoder =
4364 to_intel_encoder(conn_state->best_encoder);
4366 if (conn_state->crtc != &crtc->base)
4369 if (encoder->pre_enable)
4370 encoder->pre_enable(state, encoder,
4371 crtc_state, conn_state);
4375 static void intel_encoders_enable(struct intel_atomic_state *state,
4376 struct intel_crtc *crtc)
4378 const struct intel_crtc_state *crtc_state =
4379 intel_atomic_get_new_crtc_state(state, crtc);
4380 const struct drm_connector_state *conn_state;
4381 struct drm_connector *conn;
4384 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4385 struct intel_encoder *encoder =
4386 to_intel_encoder(conn_state->best_encoder);
4388 if (conn_state->crtc != &crtc->base)
4391 if (encoder->enable)
4392 encoder->enable(state, encoder,
4393 crtc_state, conn_state);
4394 intel_opregion_notify_encoder(encoder, true);
4398 static void intel_encoders_disable(struct intel_atomic_state *state,
4399 struct intel_crtc *crtc)
4401 const struct intel_crtc_state *old_crtc_state =
4402 intel_atomic_get_old_crtc_state(state, crtc);
4403 const struct drm_connector_state *old_conn_state;
4404 struct drm_connector *conn;
4407 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
4408 struct intel_encoder *encoder =
4409 to_intel_encoder(old_conn_state->best_encoder);
4411 if (old_conn_state->crtc != &crtc->base)
4414 intel_opregion_notify_encoder(encoder, false);
4415 if (encoder->disable)
4416 encoder->disable(state, encoder,
4417 old_crtc_state, old_conn_state);
4421 static void intel_encoders_post_disable(struct intel_atomic_state *state,
4422 struct intel_crtc *crtc)
4424 const struct intel_crtc_state *old_crtc_state =
4425 intel_atomic_get_old_crtc_state(state, crtc);
4426 const struct drm_connector_state *old_conn_state;
4427 struct drm_connector *conn;
4430 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
4431 struct intel_encoder *encoder =
4432 to_intel_encoder(old_conn_state->best_encoder);
4434 if (old_conn_state->crtc != &crtc->base)
4437 if (encoder->post_disable)
4438 encoder->post_disable(state, encoder,
4439 old_crtc_state, old_conn_state);
4443 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
4444 struct intel_crtc *crtc)
4446 const struct intel_crtc_state *old_crtc_state =
4447 intel_atomic_get_old_crtc_state(state, crtc);
4448 const struct drm_connector_state *old_conn_state;
4449 struct drm_connector *conn;
4452 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
4453 struct intel_encoder *encoder =
4454 to_intel_encoder(old_conn_state->best_encoder);
4456 if (old_conn_state->crtc != &crtc->base)
4459 if (encoder->post_pll_disable)
4460 encoder->post_pll_disable(state, encoder,
4461 old_crtc_state, old_conn_state);
4465 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
4466 struct intel_crtc *crtc)
4468 const struct intel_crtc_state *crtc_state =
4469 intel_atomic_get_new_crtc_state(state, crtc);
4470 const struct drm_connector_state *conn_state;
4471 struct drm_connector *conn;
4474 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4475 struct intel_encoder *encoder =
4476 to_intel_encoder(conn_state->best_encoder);
4478 if (conn_state->crtc != &crtc->base)
4481 if (encoder->update_pipe)
4482 encoder->update_pipe(state, encoder,
4483 crtc_state, conn_state);
4487 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
4489 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4490 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4492 plane->disable_plane(plane, crtc_state);
4495 static void ilk_crtc_enable(struct intel_atomic_state *state,
4496 struct intel_crtc *crtc)
4498 const struct intel_crtc_state *new_crtc_state =
4499 intel_atomic_get_new_crtc_state(state, crtc);
4500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4501 enum pipe pipe = crtc->pipe;
4503 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4507 * Sometimes spurious CPU pipe underruns happen during FDI
4508 * training, at least with VGA+HDMI cloning. Suppress them.
4510 * On ILK we get an occasional spurious CPU pipe underruns
4511 * between eDP port A enable and vdd enable. Also PCH port
4512 * enable seems to result in the occasional CPU pipe underrun.
4514 * Spurious PCH underruns also occur during PCH enabling.
4516 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4517 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4519 if (new_crtc_state->has_pch_encoder)
4520 intel_prepare_shared_dpll(new_crtc_state);
4522 if (intel_crtc_has_dp_encoder(new_crtc_state))
4523 intel_dp_set_m_n(new_crtc_state, M1_N1);
4525 intel_set_transcoder_timings(new_crtc_state);
4526 intel_set_pipe_src_size(new_crtc_state);
4528 if (new_crtc_state->has_pch_encoder)
4529 intel_cpu_transcoder_set_m_n(new_crtc_state,
4530 &new_crtc_state->fdi_m_n, NULL);
4532 ilk_set_pipeconf(new_crtc_state);
4534 crtc->active = true;
4536 intel_encoders_pre_enable(state, crtc);
4538 if (new_crtc_state->has_pch_encoder) {
4539 /* Note: FDI PLL enabling _must_ be done before we enable the
4540 * cpu pipes, hence this is separate from all the other fdi/pch
4542 ilk_fdi_pll_enable(new_crtc_state);
4544 assert_fdi_tx_disabled(dev_priv, pipe);
4545 assert_fdi_rx_disabled(dev_priv, pipe);
4548 ilk_pfit_enable(new_crtc_state);
4551 * On ILK+ LUT must be loaded before the pipe is running but with
4554 intel_color_load_luts(new_crtc_state);
4555 intel_color_commit(new_crtc_state);
4556 /* update DSPCNTR to configure gamma for pipe bottom color */
4557 intel_disable_primary_plane(new_crtc_state);
4559 if (dev_priv->display.initial_watermarks)
4560 dev_priv->display.initial_watermarks(state, crtc);
4561 intel_enable_pipe(new_crtc_state);
4563 if (new_crtc_state->has_pch_encoder)
4564 ilk_pch_enable(state, new_crtc_state);
4566 intel_crtc_vblank_on(new_crtc_state);
4568 intel_encoders_enable(state, crtc);
4570 if (HAS_PCH_CPT(dev_priv))
4571 cpt_verify_modeset(dev_priv, pipe);
4574 * Must wait for vblank to avoid spurious PCH FIFO underruns.
4575 * And a second vblank wait is needed at least on ILK with
4576 * some interlaced HDMI modes. Let's do the double wait always
4577 * in case there are more corner cases we don't know about.
4579 if (new_crtc_state->has_pch_encoder) {
4580 intel_wait_for_vblank(dev_priv, pipe);
4581 intel_wait_for_vblank(dev_priv, pipe);
4583 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4584 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4587 /* IPS only exists on ULT machines and is tied to pipe A. */
4588 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4590 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
4593 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
4594 enum pipe pipe, bool apply)
4596 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
4597 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
4604 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
4607 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
4609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4610 enum pipe pipe = crtc->pipe;
4613 val = MBUS_DBOX_A_CREDIT(2);
4615 if (INTEL_GEN(dev_priv) >= 12) {
4616 val |= MBUS_DBOX_BW_CREDIT(2);
4617 val |= MBUS_DBOX_B_CREDIT(12);
4619 val |= MBUS_DBOX_BW_CREDIT(1);
4620 val |= MBUS_DBOX_B_CREDIT(8);
4623 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
4626 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
4628 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4631 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
4632 HSW_LINETIME(crtc_state->linetime) |
4633 HSW_IPS_LINETIME(crtc_state->ips_linetime));
4636 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
4638 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4639 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4640 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
4643 val = intel_de_read(dev_priv, reg);
4644 val &= ~HSW_FRAME_START_DELAY_MASK;
4645 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4646 intel_de_write(dev_priv, reg, val);
4649 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
4650 const struct intel_crtc_state *crtc_state)
4652 struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
4653 struct intel_crtc_state *master_crtc_state;
4654 struct drm_connector_state *conn_state;
4655 struct drm_connector *conn;
4656 struct intel_encoder *encoder = NULL;
4659 if (crtc_state->bigjoiner_slave)
4660 master = crtc_state->bigjoiner_linked_crtc;
4662 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
4664 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4665 if (conn_state->crtc != &master->base)
4668 encoder = to_intel_encoder(conn_state->best_encoder);
4672 if (!crtc_state->bigjoiner_slave) {
4673 /* need to enable VDSC, which we skipped in pre-enable */
4674 intel_dsc_enable(encoder, crtc_state);
4677 * Enable sequence steps 1-7 on bigjoiner master
4679 intel_encoders_pre_pll_enable(state, master);
4680 intel_enable_shared_dpll(master_crtc_state);
4681 intel_encoders_pre_enable(state, master);
4683 /* and DSC on slave */
4684 intel_dsc_enable(NULL, crtc_state);
4688 static void hsw_crtc_enable(struct intel_atomic_state *state,
4689 struct intel_crtc *crtc)
4691 const struct intel_crtc_state *new_crtc_state =
4692 intel_atomic_get_new_crtc_state(state, crtc);
4693 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4694 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
4695 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
4696 bool psl_clkgate_wa;
4698 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4701 if (!new_crtc_state->bigjoiner) {
4702 intel_encoders_pre_pll_enable(state, crtc);
4704 if (new_crtc_state->shared_dpll)
4705 intel_enable_shared_dpll(new_crtc_state);
4707 intel_encoders_pre_enable(state, crtc);
4709 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
4712 intel_set_pipe_src_size(new_crtc_state);
4713 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
4714 bdw_set_pipemisc(new_crtc_state);
4716 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
4717 intel_set_transcoder_timings(new_crtc_state);
4719 if (cpu_transcoder != TRANSCODER_EDP)
4720 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
4721 new_crtc_state->pixel_multiplier - 1);
4723 if (new_crtc_state->has_pch_encoder)
4724 intel_cpu_transcoder_set_m_n(new_crtc_state,
4725 &new_crtc_state->fdi_m_n, NULL);
4727 hsw_set_frame_start_delay(new_crtc_state);
4730 if (!transcoder_is_dsi(cpu_transcoder))
4731 hsw_set_pipeconf(new_crtc_state);
4733 crtc->active = true;
4735 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
4736 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
4737 new_crtc_state->pch_pfit.enabled;
4739 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
4741 if (INTEL_GEN(dev_priv) >= 9)
4742 skl_pfit_enable(new_crtc_state);
4744 ilk_pfit_enable(new_crtc_state);
4747 * On ILK+ LUT must be loaded before the pipe is running but with
4750 intel_color_load_luts(new_crtc_state);
4751 intel_color_commit(new_crtc_state);
4752 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
4753 if (INTEL_GEN(dev_priv) < 9)
4754 intel_disable_primary_plane(new_crtc_state);
4756 hsw_set_linetime_wm(new_crtc_state);
4758 if (INTEL_GEN(dev_priv) >= 11)
4759 icl_set_pipe_chicken(crtc);
4761 if (dev_priv->display.initial_watermarks)
4762 dev_priv->display.initial_watermarks(state, crtc);
4764 if (INTEL_GEN(dev_priv) >= 11)
4765 icl_pipe_mbus_enable(crtc);
4767 if (new_crtc_state->bigjoiner_slave) {
4768 trace_intel_pipe_enable(crtc);
4769 intel_crtc_vblank_on(new_crtc_state);
4772 intel_encoders_enable(state, crtc);
4774 if (psl_clkgate_wa) {
4775 intel_wait_for_vblank(dev_priv, pipe);
4776 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
4779 /* If we change the relative order between pipe/planes enabling, we need
4780 * to change the workaround. */
4781 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
4782 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
4783 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4784 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4788 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4790 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4791 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4792 enum pipe pipe = crtc->pipe;
4794 /* To avoid upsetting the power well on haswell only disable the pfit if
4795 * it's in use. The hw state code will make sure we get this right. */
4796 if (!old_crtc_state->pch_pfit.enabled)
4799 intel_de_write(dev_priv, PF_CTL(pipe), 0);
4800 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
4801 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
4804 static void ilk_crtc_disable(struct intel_atomic_state *state,
4805 struct intel_crtc *crtc)
4807 const struct intel_crtc_state *old_crtc_state =
4808 intel_atomic_get_old_crtc_state(state, crtc);
4809 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4810 enum pipe pipe = crtc->pipe;
4813 * Sometimes spurious CPU pipe underruns happen when the
4814 * pipe is already disabled, but FDI RX/TX is still enabled.
4815 * Happens at least with VGA+HDMI cloning. Suppress them.
4817 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4818 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4820 intel_encoders_disable(state, crtc);
4822 intel_crtc_vblank_off(old_crtc_state);
4824 intel_disable_pipe(old_crtc_state);
4826 ilk_pfit_disable(old_crtc_state);
4828 if (old_crtc_state->has_pch_encoder)
4829 ilk_fdi_disable(crtc);
4831 intel_encoders_post_disable(state, crtc);
4833 if (old_crtc_state->has_pch_encoder) {
4834 ilk_disable_pch_transcoder(dev_priv, pipe);
4836 if (HAS_PCH_CPT(dev_priv)) {
4840 /* disable TRANS_DP_CTL */
4841 reg = TRANS_DP_CTL(pipe);
4842 temp = intel_de_read(dev_priv, reg);
4843 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4844 TRANS_DP_PORT_SEL_MASK);
4845 temp |= TRANS_DP_PORT_SEL_NONE;
4846 intel_de_write(dev_priv, reg, temp);
4848 /* disable DPLL_SEL */
4849 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
4850 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4851 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
4854 ilk_fdi_pll_disable(crtc);
4857 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4858 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4861 static void hsw_crtc_disable(struct intel_atomic_state *state,
4862 struct intel_crtc *crtc)
4865 * FIXME collapse everything to one hook.
4866 * Need care with mst->ddi interactions.
4868 intel_encoders_disable(state, crtc);
4869 intel_encoders_post_disable(state, crtc);
4872 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
4874 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4875 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4877 if (!crtc_state->gmch_pfit.control)
4881 * The panel fitter should only be adjusted whilst the pipe is disabled,
4882 * according to register description and PRM.
4884 drm_WARN_ON(&dev_priv->drm,
4885 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
4886 assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
4888 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
4889 crtc_state->gmch_pfit.pgm_ratios);
4890 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
4892 /* Border color in case we don't scale up to the full screen. Black by
4893 * default, change to something else for debugging. */
4894 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
4897 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
4899 if (phy == PHY_NONE)
4901 else if (IS_ALDERLAKE_S(dev_priv))
4902 return phy <= PHY_E;
4903 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
4904 return phy <= PHY_D;
4905 else if (IS_JSL_EHL(dev_priv))
4906 return phy <= PHY_C;
4907 else if (INTEL_GEN(dev_priv) >= 11)
4908 return phy <= PHY_B;
4913 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
4915 if (IS_TIGERLAKE(dev_priv))
4916 return phy >= PHY_D && phy <= PHY_I;
4917 else if (IS_ICELAKE(dev_priv))
4918 return phy >= PHY_C && phy <= PHY_F;
4923 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
4925 if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
4926 return PHY_B + port - PORT_TC1;
4927 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
4928 return PHY_C + port - PORT_TC1;
4929 else if (IS_JSL_EHL(i915) && port == PORT_D)
4932 return PHY_A + port - PORT_A;
4935 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
4937 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
4938 return TC_PORT_NONE;
4940 if (INTEL_GEN(dev_priv) >= 12)
4941 return TC_PORT_1 + port - PORT_TC1;
4943 return TC_PORT_1 + port - PORT_C;
4946 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
4950 return POWER_DOMAIN_PORT_DDI_A_LANES;
4952 return POWER_DOMAIN_PORT_DDI_B_LANES;
4954 return POWER_DOMAIN_PORT_DDI_C_LANES;
4956 return POWER_DOMAIN_PORT_DDI_D_LANES;
4958 return POWER_DOMAIN_PORT_DDI_E_LANES;
4960 return POWER_DOMAIN_PORT_DDI_F_LANES;
4962 return POWER_DOMAIN_PORT_DDI_G_LANES;
4964 return POWER_DOMAIN_PORT_DDI_H_LANES;
4966 return POWER_DOMAIN_PORT_DDI_I_LANES;
4969 return POWER_DOMAIN_PORT_OTHER;
4973 enum intel_display_power_domain
4974 intel_aux_power_domain(struct intel_digital_port *dig_port)
4976 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4977 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
4979 if (intel_phy_is_tc(dev_priv, phy) &&
4980 dig_port->tc_mode == TC_PORT_TBT_ALT) {
4981 switch (dig_port->aux_ch) {
4983 return POWER_DOMAIN_AUX_C_TBT;
4985 return POWER_DOMAIN_AUX_D_TBT;
4987 return POWER_DOMAIN_AUX_E_TBT;
4989 return POWER_DOMAIN_AUX_F_TBT;
4991 return POWER_DOMAIN_AUX_G_TBT;
4993 return POWER_DOMAIN_AUX_H_TBT;
4995 return POWER_DOMAIN_AUX_I_TBT;
4997 MISSING_CASE(dig_port->aux_ch);
4998 return POWER_DOMAIN_AUX_C_TBT;
5002 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
5006 * Converts aux_ch to power_domain without caring about TBT ports for that use
5007 * intel_aux_power_domain()
5009 enum intel_display_power_domain
5010 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
5014 return POWER_DOMAIN_AUX_A;
5016 return POWER_DOMAIN_AUX_B;
5018 return POWER_DOMAIN_AUX_C;
5020 return POWER_DOMAIN_AUX_D;
5022 return POWER_DOMAIN_AUX_E;
5024 return POWER_DOMAIN_AUX_F;
5026 return POWER_DOMAIN_AUX_G;
5028 return POWER_DOMAIN_AUX_H;
5030 return POWER_DOMAIN_AUX_I;
5032 MISSING_CASE(aux_ch);
5033 return POWER_DOMAIN_AUX_A;
5037 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
5039 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5040 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5041 struct drm_encoder *encoder;
5042 enum pipe pipe = crtc->pipe;
5044 enum transcoder transcoder = crtc_state->cpu_transcoder;
5046 if (!crtc_state->hw.active)
5049 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5050 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5051 if (crtc_state->pch_pfit.enabled ||
5052 crtc_state->pch_pfit.force_thru)
5053 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5055 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
5056 crtc_state->uapi.encoder_mask) {
5057 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5059 mask |= BIT_ULL(intel_encoder->power_domain);
5062 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5063 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5065 if (crtc_state->shared_dpll)
5066 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
5068 if (crtc_state->dsc.compression_enable)
5069 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
5075 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
5077 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5079 enum intel_display_power_domain domain;
5080 u64 domains, new_domains, old_domains;
5082 domains = get_crtc_power_domains(crtc_state);
5084 new_domains = domains & ~crtc->enabled_power_domains.mask;
5085 old_domains = crtc->enabled_power_domains.mask & ~domains;
5087 for_each_power_domain(domain, new_domains)
5088 intel_display_power_get_in_set(dev_priv,
5089 &crtc->enabled_power_domains,
5095 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
5098 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
5099 &crtc->enabled_power_domains,
5103 static void valleyview_crtc_enable(struct intel_atomic_state *state,
5104 struct intel_crtc *crtc)
5106 const struct intel_crtc_state *new_crtc_state =
5107 intel_atomic_get_new_crtc_state(state, crtc);
5108 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5109 enum pipe pipe = crtc->pipe;
5111 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
5114 if (intel_crtc_has_dp_encoder(new_crtc_state))
5115 intel_dp_set_m_n(new_crtc_state, M1_N1);
5117 intel_set_transcoder_timings(new_crtc_state);
5118 intel_set_pipe_src_size(new_crtc_state);
5120 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
5121 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5122 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
5125 i9xx_set_pipeconf(new_crtc_state);
5127 crtc->active = true;
5129 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5131 intel_encoders_pre_pll_enable(state, crtc);
5133 if (IS_CHERRYVIEW(dev_priv)) {
5134 chv_prepare_pll(crtc, new_crtc_state);
5135 chv_enable_pll(crtc, new_crtc_state);
5137 vlv_prepare_pll(crtc, new_crtc_state);
5138 vlv_enable_pll(crtc, new_crtc_state);
5141 intel_encoders_pre_enable(state, crtc);
5143 i9xx_pfit_enable(new_crtc_state);
5145 intel_color_load_luts(new_crtc_state);
5146 intel_color_commit(new_crtc_state);
5147 /* update DSPCNTR to configure gamma for pipe bottom color */
5148 intel_disable_primary_plane(new_crtc_state);
5150 dev_priv->display.initial_watermarks(state, crtc);
5151 intel_enable_pipe(new_crtc_state);
5153 intel_crtc_vblank_on(new_crtc_state);
5155 intel_encoders_enable(state, crtc);
5158 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
5160 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5161 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5163 intel_de_write(dev_priv, FP0(crtc->pipe),
5164 crtc_state->dpll_hw_state.fp0);
5165 intel_de_write(dev_priv, FP1(crtc->pipe),
5166 crtc_state->dpll_hw_state.fp1);
5169 static void i9xx_crtc_enable(struct intel_atomic_state *state,
5170 struct intel_crtc *crtc)
5172 const struct intel_crtc_state *new_crtc_state =
5173 intel_atomic_get_new_crtc_state(state, crtc);
5174 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5175 enum pipe pipe = crtc->pipe;
5177 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
5180 i9xx_set_pll_dividers(new_crtc_state);
5182 if (intel_crtc_has_dp_encoder(new_crtc_state))
5183 intel_dp_set_m_n(new_crtc_state, M1_N1);
5185 intel_set_transcoder_timings(new_crtc_state);
5186 intel_set_pipe_src_size(new_crtc_state);
5188 i9xx_set_pipeconf(new_crtc_state);
5190 crtc->active = true;
5192 if (!IS_GEN(dev_priv, 2))
5193 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5195 intel_encoders_pre_enable(state, crtc);
5197 i9xx_enable_pll(crtc, new_crtc_state);
5199 i9xx_pfit_enable(new_crtc_state);
5201 intel_color_load_luts(new_crtc_state);
5202 intel_color_commit(new_crtc_state);
5203 /* update DSPCNTR to configure gamma for pipe bottom color */
5204 intel_disable_primary_plane(new_crtc_state);
5206 if (dev_priv->display.initial_watermarks)
5207 dev_priv->display.initial_watermarks(state, crtc);
5209 intel_update_watermarks(crtc);
5210 intel_enable_pipe(new_crtc_state);
5212 intel_crtc_vblank_on(new_crtc_state);
5214 intel_encoders_enable(state, crtc);
5216 /* prevents spurious underruns */
5217 if (IS_GEN(dev_priv, 2))
5218 intel_wait_for_vblank(dev_priv, pipe);
5221 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
5223 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5226 if (!old_crtc_state->gmch_pfit.control)
5229 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
5231 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
5232 intel_de_read(dev_priv, PFIT_CONTROL));
5233 intel_de_write(dev_priv, PFIT_CONTROL, 0);
5236 static void i9xx_crtc_disable(struct intel_atomic_state *state,
5237 struct intel_crtc *crtc)
5239 struct intel_crtc_state *old_crtc_state =
5240 intel_atomic_get_old_crtc_state(state, crtc);
5241 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5242 enum pipe pipe = crtc->pipe;
5245 * On gen2 planes are double buffered but the pipe isn't, so we must
5246 * wait for planes to fully turn off before disabling the pipe.
5248 if (IS_GEN(dev_priv, 2))
5249 intel_wait_for_vblank(dev_priv, pipe);
5251 intel_encoders_disable(state, crtc);
5253 intel_crtc_vblank_off(old_crtc_state);
5255 intel_disable_pipe(old_crtc_state);
5257 i9xx_pfit_disable(old_crtc_state);
5259 intel_encoders_post_disable(state, crtc);
5261 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
5262 if (IS_CHERRYVIEW(dev_priv))
5263 chv_disable_pll(dev_priv, pipe);
5264 else if (IS_VALLEYVIEW(dev_priv))
5265 vlv_disable_pll(dev_priv, pipe);
5267 i9xx_disable_pll(old_crtc_state);
5270 intel_encoders_post_pll_disable(state, crtc);
5272 if (!IS_GEN(dev_priv, 2))
5273 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5275 if (!dev_priv->display.initial_watermarks)
5276 intel_update_watermarks(crtc);
5278 /* clock the pipe down to 640x480@60 to potentially save power */
5279 if (IS_I830(dev_priv))
5280 i830_enable_pipe(dev_priv, pipe);
5283 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
5284 struct drm_modeset_acquire_ctx *ctx)
5286 struct intel_encoder *encoder;
5287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5288 struct intel_bw_state *bw_state =
5289 to_intel_bw_state(dev_priv->bw_obj.state);
5290 struct intel_cdclk_state *cdclk_state =
5291 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
5292 struct intel_dbuf_state *dbuf_state =
5293 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
5294 struct intel_crtc_state *crtc_state =
5295 to_intel_crtc_state(crtc->base.state);
5296 struct intel_plane *plane;
5297 struct drm_atomic_state *state;
5298 struct intel_crtc_state *temp_crtc_state;
5299 enum pipe pipe = crtc->pipe;
5302 if (!crtc_state->hw.active)
5305 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5306 const struct intel_plane_state *plane_state =
5307 to_intel_plane_state(plane->base.state);
5309 if (plane_state->uapi.visible)
5310 intel_plane_disable_noatomic(crtc, plane);
5313 state = drm_atomic_state_alloc(&dev_priv->drm);
5315 drm_dbg_kms(&dev_priv->drm,
5316 "failed to disable [CRTC:%d:%s], out of memory",
5317 crtc->base.base.id, crtc->base.name);
5321 state->acquire_ctx = ctx;
5323 /* Everything's already locked, -EDEADLK can't happen. */
5324 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
5325 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
5327 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
5329 dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
5331 drm_atomic_state_put(state);
5333 drm_dbg_kms(&dev_priv->drm,
5334 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
5335 crtc->base.base.id, crtc->base.name);
5337 crtc->active = false;
5338 crtc->base.enabled = false;
5340 drm_WARN_ON(&dev_priv->drm,
5341 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
5342 crtc_state->uapi.active = false;
5343 crtc_state->uapi.connector_mask = 0;
5344 crtc_state->uapi.encoder_mask = 0;
5345 intel_crtc_free_hw_state(crtc_state);
5346 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
5348 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
5349 encoder->base.crtc = NULL;
5351 intel_fbc_disable(crtc);
5352 intel_update_watermarks(crtc);
5353 intel_disable_shared_dpll(crtc_state);
5355 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
5357 dev_priv->active_pipes &= ~BIT(pipe);
5358 cdclk_state->min_cdclk[pipe] = 0;
5359 cdclk_state->min_voltage_level[pipe] = 0;
5360 cdclk_state->active_pipes &= ~BIT(pipe);
5362 dbuf_state->active_pipes &= ~BIT(pipe);
5364 bw_state->data_rate[pipe] = 0;
5365 bw_state->num_active_planes[pipe] = 0;
5369 * turn all crtc's off, but do not adjust state
5370 * This has to be paired with a call to intel_modeset_setup_hw_state.
5372 int intel_display_suspend(struct drm_device *dev)
5374 struct drm_i915_private *dev_priv = to_i915(dev);
5375 struct drm_atomic_state *state;
5378 state = drm_atomic_helper_suspend(dev);
5379 ret = PTR_ERR_OR_ZERO(state);
5381 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
5384 dev_priv->modeset_restore_state = state;
5388 void intel_encoder_destroy(struct drm_encoder *encoder)
5390 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5392 drm_encoder_cleanup(encoder);
5393 kfree(intel_encoder);
5396 /* Cross check the actual hw state with our own modeset state tracking (and it's
5397 * internal consistency). */
5398 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
5399 struct drm_connector_state *conn_state)
5401 struct intel_connector *connector = to_intel_connector(conn_state->connector);
5402 struct drm_i915_private *i915 = to_i915(connector->base.dev);
5404 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
5405 connector->base.base.id, connector->base.name);
5407 if (connector->get_hw_state(connector)) {
5408 struct intel_encoder *encoder = intel_attached_encoder(connector);
5410 I915_STATE_WARN(!crtc_state,
5411 "connector enabled without attached crtc\n");
5416 I915_STATE_WARN(!crtc_state->hw.active,
5417 "connector is active, but attached crtc isn't\n");
5419 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
5422 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
5423 "atomic encoder doesn't match attached encoder\n");
5425 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
5426 "attached encoder crtc differs from connector crtc\n");
5428 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
5429 "attached crtc is active, but connector isn't\n");
5430 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
5431 "best encoder set without crtc!\n");
5435 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
5437 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5438 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5440 /* IPS only exists on ULT machines and is tied to pipe A. */
5441 if (!hsw_crtc_supports_ips(crtc))
5444 if (!dev_priv->params.enable_ips)
5447 if (crtc_state->pipe_bpp > 24)
5451 * We compare against max which means we must take
5452 * the increased cdclk requirement into account when
5453 * calculating the new cdclk.
5455 * Should measure whether using a lower cdclk w/o IPS
5457 if (IS_BROADWELL(dev_priv) &&
5458 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
5464 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
5466 struct drm_i915_private *dev_priv =
5467 to_i915(crtc_state->uapi.crtc->dev);
5468 struct intel_atomic_state *state =
5469 to_intel_atomic_state(crtc_state->uapi.state);
5471 crtc_state->ips_enabled = false;
5473 if (!hsw_crtc_state_ips_capable(crtc_state))
5477 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
5478 * enabled and disabled dynamically based on package C states,
5479 * user space can't make reliable use of the CRCs, so let's just
5480 * completely disable it.
5482 if (crtc_state->crc_enabled)
5485 /* IPS should be fine as long as at least one plane is enabled. */
5486 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
5489 if (IS_BROADWELL(dev_priv)) {
5490 const struct intel_cdclk_state *cdclk_state;
5492 cdclk_state = intel_atomic_get_cdclk_state(state);
5493 if (IS_ERR(cdclk_state))
5494 return PTR_ERR(cdclk_state);
5496 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
5497 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
5501 crtc_state->ips_enabled = true;
5506 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
5508 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5510 /* GDG double wide on either pipe, otherwise pipe A only */
5511 return INTEL_GEN(dev_priv) < 4 &&
5512 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
5515 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
5517 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
5518 unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
5521 * We only use IF-ID interlacing. If we ever use
5522 * PF-ID we'll need to adjust the pixel_rate here.
5525 if (!crtc_state->pch_pfit.enabled)
5528 pipe_w = crtc_state->pipe_src_w;
5529 pipe_h = crtc_state->pipe_src_h;
5531 pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
5532 pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
5534 if (pipe_w < pfit_w)
5536 if (pipe_h < pfit_h)
5539 if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
5540 !pfit_w || !pfit_h))
5543 return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
5547 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
5548 const struct drm_display_mode *timings)
5550 mode->hdisplay = timings->crtc_hdisplay;
5551 mode->htotal = timings->crtc_htotal;
5552 mode->hsync_start = timings->crtc_hsync_start;
5553 mode->hsync_end = timings->crtc_hsync_end;
5555 mode->vdisplay = timings->crtc_vdisplay;
5556 mode->vtotal = timings->crtc_vtotal;
5557 mode->vsync_start = timings->crtc_vsync_start;
5558 mode->vsync_end = timings->crtc_vsync_end;
5560 mode->flags = timings->flags;
5561 mode->type = DRM_MODE_TYPE_DRIVER;
5563 mode->clock = timings->crtc_clock;
5565 drm_mode_set_name(mode);
5568 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
5570 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5572 if (HAS_GMCH(dev_priv))
5573 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
5574 crtc_state->pixel_rate =
5575 crtc_state->hw.pipe_mode.crtc_clock;
5577 crtc_state->pixel_rate =
5578 ilk_pipe_pixel_rate(crtc_state);
5581 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
5583 struct drm_display_mode *mode = &crtc_state->hw.mode;
5584 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
5585 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
5587 drm_mode_copy(pipe_mode, adjusted_mode);
5589 if (crtc_state->bigjoiner) {
5591 * transcoder is programmed to the full mode,
5592 * but pipe timings are half of the transcoder mode
5594 pipe_mode->crtc_hdisplay /= 2;
5595 pipe_mode->crtc_hblank_start /= 2;
5596 pipe_mode->crtc_hblank_end /= 2;
5597 pipe_mode->crtc_hsync_start /= 2;
5598 pipe_mode->crtc_hsync_end /= 2;
5599 pipe_mode->crtc_htotal /= 2;
5600 pipe_mode->crtc_clock /= 2;
5603 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
5604 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
5606 intel_crtc_compute_pixel_rate(crtc_state);
5608 drm_mode_copy(mode, adjusted_mode);
5609 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
5610 mode->vdisplay = crtc_state->pipe_src_h;
5613 static void intel_encoder_get_config(struct intel_encoder *encoder,
5614 struct intel_crtc_state *crtc_state)
5616 encoder->get_config(encoder, crtc_state);
5618 intel_crtc_readout_derived_state(crtc_state);
5621 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5622 struct intel_crtc_state *pipe_config)
5624 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5625 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
5626 int clock_limit = dev_priv->max_dotclk_freq;
5628 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
5630 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
5631 if (pipe_config->bigjoiner) {
5632 pipe_mode->crtc_clock /= 2;
5633 pipe_mode->crtc_hdisplay /= 2;
5634 pipe_mode->crtc_hblank_start /= 2;
5635 pipe_mode->crtc_hblank_end /= 2;
5636 pipe_mode->crtc_hsync_start /= 2;
5637 pipe_mode->crtc_hsync_end /= 2;
5638 pipe_mode->crtc_htotal /= 2;
5639 pipe_config->pipe_src_w /= 2;
5642 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
5644 if (INTEL_GEN(dev_priv) < 4) {
5645 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
5648 * Enable double wide mode when the dot clock
5649 * is > 90% of the (display) core speed.
5651 if (intel_crtc_supports_double_wide(crtc) &&
5652 pipe_mode->crtc_clock > clock_limit) {
5653 clock_limit = dev_priv->max_dotclk_freq;
5654 pipe_config->double_wide = true;
5658 if (pipe_mode->crtc_clock > clock_limit) {
5659 drm_dbg_kms(&dev_priv->drm,
5660 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
5661 pipe_mode->crtc_clock, clock_limit,
5662 yesno(pipe_config->double_wide));
5667 * Pipe horizontal size must be even in:
5669 * - LVDS dual channel mode
5670 * - Double wide pipe
5672 if (pipe_config->pipe_src_w & 1) {
5673 if (pipe_config->double_wide) {
5674 drm_dbg_kms(&dev_priv->drm,
5675 "Odd pipe source width not supported with double wide pipe\n");
5679 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
5680 intel_is_dual_link_lvds(dev_priv)) {
5681 drm_dbg_kms(&dev_priv->drm,
5682 "Odd pipe source width not supported with dual link LVDS\n");
5687 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5688 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5690 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
5691 pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
5694 intel_crtc_compute_pixel_rate(pipe_config);
5696 if (pipe_config->has_pch_encoder)
5697 return ilk_fdi_compute_config(crtc, pipe_config);
5703 intel_reduce_m_n_ratio(u32 *num, u32 *den)
5705 while (*num > DATA_LINK_M_N_MASK ||
5706 *den > DATA_LINK_M_N_MASK) {
5712 static void compute_m_n(unsigned int m, unsigned int n,
5713 u32 *ret_m, u32 *ret_n,
5717 * Several DP dongles in particular seem to be fussy about
5718 * too large link M/N values. Give N value as 0x8000 that
5719 * should be acceptable by specific devices. 0x8000 is the
5720 * specified fixed N value for asynchronous clock mode,
5721 * which the devices expect also in synchronous clock mode.
5724 *ret_n = DP_LINK_CONSTANT_N_VALUE;
5726 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5728 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
5729 intel_reduce_m_n_ratio(ret_m, ret_n);
5733 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
5734 int pixel_clock, int link_clock,
5735 struct intel_link_m_n *m_n,
5736 bool constant_n, bool fec_enable)
5738 u32 data_clock = bits_per_pixel * pixel_clock;
5741 data_clock = intel_dp_mode_to_fec_clock(data_clock);
5744 compute_m_n(data_clock,
5745 link_clock * nlanes * 8,
5746 &m_n->gmch_m, &m_n->gmch_n,
5749 compute_m_n(pixel_clock, link_clock,
5750 &m_n->link_m, &m_n->link_n,
5754 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
5757 * There may be no VBT; and if the BIOS enabled SSC we can
5758 * just keep using it to avoid unnecessary flicker. Whereas if the
5759 * BIOS isn't using it, don't assume it will work even if the VBT
5760 * indicates as much.
5762 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5763 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
5767 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
5768 drm_dbg_kms(&dev_priv->drm,
5769 "SSC %s by BIOS, overriding VBT which says %s\n",
5770 enableddisabled(bios_lvds_use_ssc),
5771 enableddisabled(dev_priv->vbt.lvds_use_ssc));
5772 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
5777 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5783 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5784 * and set it to a reasonable value instead.
5786 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5787 reg_val &= 0xffffff00;
5788 reg_val |= 0x00000030;
5789 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5791 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5792 reg_val &= 0x00ffffff;
5793 reg_val |= 0x8c000000;
5794 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5796 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5797 reg_val &= 0xffffff00;
5798 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5800 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5801 reg_val &= 0x00ffffff;
5802 reg_val |= 0xb0000000;
5803 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5806 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5807 const struct intel_link_m_n *m_n)
5809 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5811 enum pipe pipe = crtc->pipe;
5813 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
5814 TU_SIZE(m_n->tu) | m_n->gmch_m);
5815 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5816 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5817 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5820 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
5821 enum transcoder transcoder)
5823 if (IS_HASWELL(dev_priv))
5824 return transcoder == TRANSCODER_EDP;
5827 * Strictly speaking some registers are available before
5828 * gen7, but we only support DRRS on gen7+
5830 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
5833 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5834 const struct intel_link_m_n *m_n,
5835 const struct intel_link_m_n *m2_n2)
5837 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5838 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5839 enum pipe pipe = crtc->pipe;
5840 enum transcoder transcoder = crtc_state->cpu_transcoder;
5842 if (INTEL_GEN(dev_priv) >= 5) {
5843 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
5844 TU_SIZE(m_n->tu) | m_n->gmch_m);
5845 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
5847 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
5849 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
5852 * M2_N2 registers are set only if DRRS is supported
5853 * (to make sure the registers are not unnecessarily accessed).
5855 if (m2_n2 && crtc_state->has_drrs &&
5856 transcoder_has_m2_n2(dev_priv, transcoder)) {
5857 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
5858 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5859 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
5861 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
5863 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
5867 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
5868 TU_SIZE(m_n->tu) | m_n->gmch_m);
5869 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5870 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
5871 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
5875 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
5877 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5878 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
5881 dp_m_n = &crtc_state->dp_m_n;
5882 dp_m2_n2 = &crtc_state->dp_m2_n2;
5883 } else if (m_n == M2_N2) {
5886 * M2_N2 registers are not supported. Hence m2_n2 divider value
5887 * needs to be programmed into M1_N1.
5889 dp_m_n = &crtc_state->dp_m2_n2;
5891 drm_err(&i915->drm, "Unsupported divider value\n");
5895 if (crtc_state->has_pch_encoder)
5896 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
5898 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
5901 static void vlv_prepare_pll(struct intel_crtc *crtc,
5902 const struct intel_crtc_state *pipe_config)
5904 struct drm_device *dev = crtc->base.dev;
5905 struct drm_i915_private *dev_priv = to_i915(dev);
5906 enum pipe pipe = crtc->pipe;
5908 u32 bestn, bestm1, bestm2, bestp1, bestp2;
5909 u32 coreclk, reg_val;
5912 intel_de_write(dev_priv, DPLL(pipe),
5913 pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
5915 /* No need to actually set up the DPLL with DSI */
5916 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5919 vlv_dpio_get(dev_priv);
5921 bestn = pipe_config->dpll.n;
5922 bestm1 = pipe_config->dpll.m1;
5923 bestm2 = pipe_config->dpll.m2;
5924 bestp1 = pipe_config->dpll.p1;
5925 bestp2 = pipe_config->dpll.p2;
5927 /* See eDP HDMI DPIO driver vbios notes doc */
5929 /* PLL B needs special handling */
5931 vlv_pllb_recal_opamp(dev_priv, pipe);
5933 /* Set up Tx target for periodic Rcomp update */
5934 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5936 /* Disable target IRef on PLL */
5937 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5938 reg_val &= 0x00ffffff;
5939 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5941 /* Disable fast lock */
5942 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5944 /* Set idtafcrecal before PLL is enabled */
5945 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5946 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5947 mdiv |= ((bestn << DPIO_N_SHIFT));
5948 mdiv |= (1 << DPIO_K_SHIFT);
5951 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5952 * but we don't support that).
5953 * Note: don't use the DAC post divider as it seems unstable.
5955 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5956 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5958 mdiv |= DPIO_ENABLE_CALIBRATION;
5959 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5961 /* Set HBR and RBR LPF coefficients */
5962 if (pipe_config->port_clock == 162000 ||
5963 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
5964 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
5965 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5968 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5971 if (intel_crtc_has_dp_encoder(pipe_config)) {
5972 /* Use SSC source */
5974 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5977 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5979 } else { /* HDMI or VGA */
5980 /* Use bend source */
5982 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5985 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5989 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5990 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5991 if (intel_crtc_has_dp_encoder(pipe_config))
5992 coreclk |= 0x01000000;
5993 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5995 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5997 vlv_dpio_put(dev_priv);
6000 static void chv_prepare_pll(struct intel_crtc *crtc,
6001 const struct intel_crtc_state *pipe_config)
6003 struct drm_device *dev = crtc->base.dev;
6004 struct drm_i915_private *dev_priv = to_i915(dev);
6005 enum pipe pipe = crtc->pipe;
6006 enum dpio_channel port = vlv_pipe_to_channel(pipe);
6007 u32 loopfilter, tribuf_calcntr;
6008 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6012 /* Enable Refclk and SSC */
6013 intel_de_write(dev_priv, DPLL(pipe),
6014 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6016 /* No need to actually set up the DPLL with DSI */
6017 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6020 bestn = pipe_config->dpll.n;
6021 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6022 bestm1 = pipe_config->dpll.m1;
6023 bestm2 = pipe_config->dpll.m2 >> 22;
6024 bestp1 = pipe_config->dpll.p1;
6025 bestp2 = pipe_config->dpll.p2;
6026 vco = pipe_config->dpll.vco;
6030 vlv_dpio_get(dev_priv);
6032 /* p1 and p2 divider */
6033 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6034 5 << DPIO_CHV_S1_DIV_SHIFT |
6035 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6036 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6037 1 << DPIO_CHV_K_DIV_SHIFT);
6039 /* Feedback post-divider - m2 */
6040 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6042 /* Feedback refclk divider - n and m1 */
6043 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6044 DPIO_CHV_M1_DIV_BY_2 |
6045 1 << DPIO_CHV_N_DIV_SHIFT);
6047 /* M2 fraction division */
6048 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6050 /* M2 fraction division enable */
6051 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6052 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
6053 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
6055 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
6056 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
6058 /* Program digital lock detect threshold */
6059 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
6060 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
6061 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
6062 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
6064 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
6065 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
6068 if (vco == 5400000) {
6069 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
6070 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
6071 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
6072 tribuf_calcntr = 0x9;
6073 } else if (vco <= 6200000) {
6074 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
6075 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
6076 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6077 tribuf_calcntr = 0x9;
6078 } else if (vco <= 6480000) {
6079 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6080 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6081 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6082 tribuf_calcntr = 0x8;
6084 /* Not supported. Apply the same limits as in the max case */
6085 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6086 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6087 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6090 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6092 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
6093 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
6094 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
6095 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
6098 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6099 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6102 vlv_dpio_put(dev_priv);
6106 * vlv_force_pll_on - forcibly enable just the PLL
6107 * @dev_priv: i915 private structure
6108 * @pipe: pipe PLL to enable
6109 * @dpll: PLL configuration
6111 * Enable the PLL for @pipe using the supplied @dpll config. To be used
6112 * in cases where we need the PLL enabled even when @pipe is not going to
6115 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
6116 const struct dpll *dpll)
6118 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6119 struct intel_crtc_state *pipe_config;
6121 pipe_config = intel_crtc_state_alloc(crtc);
6125 pipe_config->cpu_transcoder = (enum transcoder)pipe;
6126 pipe_config->pixel_multiplier = 1;
6127 pipe_config->dpll = *dpll;
6129 if (IS_CHERRYVIEW(dev_priv)) {
6130 chv_compute_dpll(crtc, pipe_config);
6131 chv_prepare_pll(crtc, pipe_config);
6132 chv_enable_pll(crtc, pipe_config);
6134 vlv_compute_dpll(crtc, pipe_config);
6135 vlv_prepare_pll(crtc, pipe_config);
6136 vlv_enable_pll(crtc, pipe_config);
6145 * vlv_force_pll_off - forcibly disable just the PLL
6146 * @dev_priv: i915 private structure
6147 * @pipe: pipe PLL to disable
6149 * Disable the PLL for @pipe. To be used in cases where we need
6150 * the PLL enabled even when @pipe is not going to be enabled.
6152 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
6154 if (IS_CHERRYVIEW(dev_priv))
6155 chv_disable_pll(dev_priv, pipe);
6157 vlv_disable_pll(dev_priv, pipe);
6162 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
6164 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6165 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6166 enum pipe pipe = crtc->pipe;
6167 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6168 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
6169 u32 crtc_vtotal, crtc_vblank_end;
6172 /* We need to be careful not to changed the adjusted mode, for otherwise
6173 * the hw state checker will get angry at the mismatch. */
6174 crtc_vtotal = adjusted_mode->crtc_vtotal;
6175 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6177 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6178 /* the chip adds 2 halflines automatically */
6180 crtc_vblank_end -= 1;
6182 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6183 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6185 vsyncshift = adjusted_mode->crtc_hsync_start -
6186 adjusted_mode->crtc_htotal / 2;
6188 vsyncshift += adjusted_mode->crtc_htotal;
6191 if (INTEL_GEN(dev_priv) > 3)
6192 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
6195 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
6196 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
6197 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
6198 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
6199 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
6200 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
6202 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
6203 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
6204 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
6205 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
6206 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
6207 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
6209 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6210 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6211 * documented on the DDI_FUNC_CTL register description, EDP Input Select
6213 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
6214 (pipe == PIPE_B || pipe == PIPE_C))
6215 intel_de_write(dev_priv, VTOTAL(pipe),
6216 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
6220 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
6222 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6224 enum pipe pipe = crtc->pipe;
6226 /* pipesrc controls the size that is scaled from, which should
6227 * always be the user's requested size.
6229 intel_de_write(dev_priv, PIPESRC(pipe),
6230 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
6233 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
6235 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6236 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6238 if (IS_GEN(dev_priv, 2))
6241 if (INTEL_GEN(dev_priv) >= 9 ||
6242 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6243 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
6245 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
6248 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
6249 struct intel_crtc_state *pipe_config)
6251 struct drm_device *dev = crtc->base.dev;
6252 struct drm_i915_private *dev_priv = to_i915(dev);
6253 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6256 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
6257 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6258 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6260 if (!transcoder_is_dsi(cpu_transcoder)) {
6261 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
6262 pipe_config->hw.adjusted_mode.crtc_hblank_start =
6264 pipe_config->hw.adjusted_mode.crtc_hblank_end =
6265 ((tmp >> 16) & 0xffff) + 1;
6267 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
6268 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6269 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6271 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
6272 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6273 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6275 if (!transcoder_is_dsi(cpu_transcoder)) {
6276 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
6277 pipe_config->hw.adjusted_mode.crtc_vblank_start =
6279 pipe_config->hw.adjusted_mode.crtc_vblank_end =
6280 ((tmp >> 16) & 0xffff) + 1;
6282 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
6283 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6284 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6286 if (intel_pipe_is_interlaced(pipe_config)) {
6287 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6288 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
6289 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
6293 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
6294 struct intel_crtc_state *pipe_config)
6296 struct drm_device *dev = crtc->base.dev;
6297 struct drm_i915_private *dev_priv = to_i915(dev);
6300 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
6301 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6302 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6305 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
6307 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6313 /* we keep both pipes enabled on 830 */
6314 if (IS_I830(dev_priv))
6315 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
6317 if (crtc_state->double_wide)
6318 pipeconf |= PIPECONF_DOUBLE_WIDE;
6320 /* only g4x and later have fancy bpc/dither controls */
6321 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6322 IS_CHERRYVIEW(dev_priv)) {
6323 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6324 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
6325 pipeconf |= PIPECONF_DITHER_EN |
6326 PIPECONF_DITHER_TYPE_SP;
6328 switch (crtc_state->pipe_bpp) {
6330 pipeconf |= PIPECONF_6BPC;
6333 pipeconf |= PIPECONF_8BPC;
6336 pipeconf |= PIPECONF_10BPC;
6339 /* Case prevented by intel_choose_pipe_bpp_dither. */
6344 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6345 if (INTEL_GEN(dev_priv) < 4 ||
6346 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6347 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6349 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6351 pipeconf |= PIPECONF_PROGRESSIVE;
6354 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6355 crtc_state->limited_color_range)
6356 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6358 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
6360 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
6362 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
6363 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
6366 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
6368 if (IS_I830(dev_priv))
6371 return INTEL_GEN(dev_priv) >= 4 ||
6372 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
6375 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
6377 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6378 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6381 if (!i9xx_has_pfit(dev_priv))
6384 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
6385 if (!(tmp & PFIT_ENABLE))
6388 /* Check whether the pfit is attached to our pipe. */
6389 if (INTEL_GEN(dev_priv) < 4) {
6390 if (crtc->pipe != PIPE_B)
6393 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6397 crtc_state->gmch_pfit.control = tmp;
6398 crtc_state->gmch_pfit.pgm_ratios =
6399 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
6402 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6403 struct intel_crtc_state *pipe_config)
6405 struct drm_device *dev = crtc->base.dev;
6406 struct drm_i915_private *dev_priv = to_i915(dev);
6407 enum pipe pipe = crtc->pipe;
6410 int refclk = 100000;
6412 /* In case of DSI, DPLL will not be used */
6413 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6416 vlv_dpio_get(dev_priv);
6417 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6418 vlv_dpio_put(dev_priv);
6420 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6421 clock.m2 = mdiv & DPIO_M2DIV_MASK;
6422 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6423 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6424 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6426 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
6430 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6431 struct intel_initial_plane_config *plane_config)
6433 struct drm_device *dev = crtc->base.dev;
6434 struct drm_i915_private *dev_priv = to_i915(dev);
6435 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6436 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
6438 u32 val, base, offset;
6439 int fourcc, pixel_format;
6440 unsigned int aligned_height;
6441 struct drm_framebuffer *fb;
6442 struct intel_framebuffer *intel_fb;
6444 if (!plane->get_hw_state(plane, &pipe))
6447 drm_WARN_ON(dev, pipe != crtc->pipe);
6449 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6451 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
6455 fb = &intel_fb->base;
6459 val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
6461 if (INTEL_GEN(dev_priv) >= 4) {
6462 if (val & DISPPLANE_TILED) {
6463 plane_config->tiling = I915_TILING_X;
6464 fb->modifier = I915_FORMAT_MOD_X_TILED;
6467 if (val & DISPPLANE_ROTATE_180)
6468 plane_config->rotation = DRM_MODE_ROTATE_180;
6471 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
6472 val & DISPPLANE_MIRROR)
6473 plane_config->rotation |= DRM_MODE_REFLECT_X;
6475 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6476 fourcc = i9xx_format_to_fourcc(pixel_format);
6477 fb->format = drm_format_info(fourcc);
6479 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6480 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
6481 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
6482 } else if (INTEL_GEN(dev_priv) >= 4) {
6483 if (plane_config->tiling)
6484 offset = intel_de_read(dev_priv,
6485 DSPTILEOFF(i9xx_plane));
6487 offset = intel_de_read(dev_priv,
6488 DSPLINOFF(i9xx_plane));
6489 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
6491 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
6493 plane_config->base = base;
6495 val = intel_de_read(dev_priv, PIPESRC(pipe));
6496 fb->width = ((val >> 16) & 0xfff) + 1;
6497 fb->height = ((val >> 0) & 0xfff) + 1;
6499 val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
6500 fb->pitches[0] = val & 0xffffffc0;
6502 aligned_height = intel_fb_align_height(fb, 0, fb->height);
6504 plane_config->size = fb->pitches[0] * aligned_height;
6506 drm_dbg_kms(&dev_priv->drm,
6507 "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6508 crtc->base.name, plane->base.name, fb->width, fb->height,
6509 fb->format->cpp[0] * 8, base, fb->pitches[0],
6510 plane_config->size);
6512 plane_config->fb = intel_fb;
6515 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6516 struct intel_crtc_state *pipe_config)
6518 struct drm_device *dev = crtc->base.dev;
6519 struct drm_i915_private *dev_priv = to_i915(dev);
6520 enum pipe pipe = crtc->pipe;
6521 enum dpio_channel port = vlv_pipe_to_channel(pipe);
6523 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
6524 int refclk = 100000;
6526 /* In case of DSI, DPLL will not be used */
6527 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6530 vlv_dpio_get(dev_priv);
6531 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6532 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6533 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6534 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6535 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6536 vlv_dpio_put(dev_priv);
6538 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6539 clock.m2 = (pll_dw0 & 0xff) << 22;
6540 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
6541 clock.m2 |= pll_dw2 & 0x3fffff;
6542 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6543 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6544 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6546 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
6549 static enum intel_output_format
6550 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
6552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6555 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
6557 if (tmp & PIPEMISC_YUV420_ENABLE) {
6558 /* We support 4:2:0 in full blend mode only */
6559 drm_WARN_ON(&dev_priv->drm,
6560 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
6562 return INTEL_OUTPUT_FORMAT_YCBCR420;
6563 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
6564 return INTEL_OUTPUT_FORMAT_YCBCR444;
6566 return INTEL_OUTPUT_FORMAT_RGB;
6570 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
6572 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6573 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6574 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6575 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
6578 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
6580 if (tmp & DISPPLANE_GAMMA_ENABLE)
6581 crtc_state->gamma_enable = true;
6583 if (!HAS_GMCH(dev_priv) &&
6584 tmp & DISPPLANE_PIPE_CSC_ENABLE)
6585 crtc_state->csc_enable = true;
6588 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6589 struct intel_crtc_state *pipe_config)
6591 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6592 enum intel_display_power_domain power_domain;
6593 intel_wakeref_t wakeref;
6597 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6598 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6602 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6603 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6604 pipe_config->shared_dpll = NULL;
6608 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6609 if (!(tmp & PIPECONF_ENABLE))
6612 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6613 IS_CHERRYVIEW(dev_priv)) {
6614 switch (tmp & PIPECONF_BPC_MASK) {
6616 pipe_config->pipe_bpp = 18;
6619 pipe_config->pipe_bpp = 24;
6621 case PIPECONF_10BPC:
6622 pipe_config->pipe_bpp = 30;
6629 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6630 (tmp & PIPECONF_COLOR_RANGE_SELECT))
6631 pipe_config->limited_color_range = true;
6633 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
6634 PIPECONF_GAMMA_MODE_SHIFT;
6636 if (IS_CHERRYVIEW(dev_priv))
6637 pipe_config->cgm_mode = intel_de_read(dev_priv,
6638 CGM_PIPE_MODE(crtc->pipe));
6640 i9xx_get_pipe_color_config(pipe_config);
6641 intel_color_get_config(pipe_config);
6643 if (INTEL_GEN(dev_priv) < 4)
6644 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6646 intel_get_transcoder_timings(crtc, pipe_config);
6647 intel_get_pipe_src_size(crtc, pipe_config);
6649 i9xx_get_pfit_config(pipe_config);
6651 if (INTEL_GEN(dev_priv) >= 4) {
6652 /* No way to read it out on pipes B and C */
6653 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
6654 tmp = dev_priv->chv_dpll_md[crtc->pipe];
6656 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
6657 pipe_config->pixel_multiplier =
6658 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6659 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6660 pipe_config->dpll_hw_state.dpll_md = tmp;
6661 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
6662 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
6663 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
6664 pipe_config->pixel_multiplier =
6665 ((tmp & SDVO_MULTIPLIER_MASK)
6666 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6668 /* Note that on i915G/GM the pixel multiplier is in the sdvo
6669 * port and will be fixed up in the encoder->get_config
6671 pipe_config->pixel_multiplier = 1;
6673 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
6675 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
6676 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
6678 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
6681 /* Mask out read-only status bits. */
6682 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6683 DPLL_PORTC_READY_MASK |
6684 DPLL_PORTB_READY_MASK);
6687 if (IS_CHERRYVIEW(dev_priv))
6688 chv_crtc_clock_get(crtc, pipe_config);
6689 else if (IS_VALLEYVIEW(dev_priv))
6690 vlv_crtc_clock_get(crtc, pipe_config);
6692 i9xx_crtc_clock_get(crtc, pipe_config);
6695 * Normally the dotclock is filled in by the encoder .get_config()
6696 * but in case the pipe is enabled w/o any ports we need a sane
6699 pipe_config->hw.adjusted_mode.crtc_clock =
6700 pipe_config->port_clock / pipe_config->pixel_multiplier;
6705 intel_display_power_put(dev_priv, power_domain, wakeref);
6710 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
6712 struct intel_encoder *encoder;
6715 bool has_lvds = false;
6716 bool has_cpu_edp = false;
6717 bool has_panel = false;
6718 bool has_ck505 = false;
6719 bool can_ssc = false;
6720 bool using_ssc_source = false;
6722 /* We need to take the global config into account */
6723 for_each_intel_encoder(&dev_priv->drm, encoder) {
6724 switch (encoder->type) {
6725 case INTEL_OUTPUT_LVDS:
6729 case INTEL_OUTPUT_EDP:
6731 if (encoder->port == PORT_A)
6739 if (HAS_PCH_IBX(dev_priv)) {
6740 has_ck505 = dev_priv->vbt.display_clock_mode;
6741 can_ssc = has_ck505;
6747 /* Check if any DPLLs are using the SSC source */
6748 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
6749 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
6751 if (!(temp & DPLL_VCO_ENABLE))
6754 if ((temp & PLL_REF_INPUT_MASK) ==
6755 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6756 using_ssc_source = true;
6761 drm_dbg_kms(&dev_priv->drm,
6762 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
6763 has_panel, has_lvds, has_ck505, using_ssc_source);
6765 /* Ironlake: try to setup display ref clock before DPLL
6766 * enabling. This is only under driver's control after
6767 * PCH B stepping, previous chipset stepping should be
6768 * ignoring this setting.
6770 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
6772 /* As we must carefully and slowly disable/enable each source in turn,
6773 * compute the final state we want first and check if we need to
6774 * make any changes at all.
6777 final &= ~DREF_NONSPREAD_SOURCE_MASK;
6779 final |= DREF_NONSPREAD_CK505_ENABLE;
6781 final |= DREF_NONSPREAD_SOURCE_ENABLE;
6783 final &= ~DREF_SSC_SOURCE_MASK;
6784 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6785 final &= ~DREF_SSC1_ENABLE;
6788 final |= DREF_SSC_SOURCE_ENABLE;
6790 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6791 final |= DREF_SSC1_ENABLE;
6794 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6795 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6797 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6799 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6800 } else if (using_ssc_source) {
6801 final |= DREF_SSC_SOURCE_ENABLE;
6802 final |= DREF_SSC1_ENABLE;
6808 /* Always enable nonspread source */
6809 val &= ~DREF_NONSPREAD_SOURCE_MASK;
6812 val |= DREF_NONSPREAD_CK505_ENABLE;
6814 val |= DREF_NONSPREAD_SOURCE_ENABLE;
6817 val &= ~DREF_SSC_SOURCE_MASK;
6818 val |= DREF_SSC_SOURCE_ENABLE;
6820 /* SSC must be turned on before enabling the CPU output */
6821 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6822 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
6823 val |= DREF_SSC1_ENABLE;
6825 val &= ~DREF_SSC1_ENABLE;
6827 /* Get SSC going before enabling the outputs */
6828 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6829 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6832 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6834 /* Enable CPU source on CPU attached eDP */
6836 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6837 drm_dbg_kms(&dev_priv->drm,
6838 "Using SSC on eDP\n");
6839 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6841 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6843 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6845 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6846 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6849 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
6851 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6853 /* Turn off CPU output */
6854 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6856 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6857 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6860 if (!using_ssc_source) {
6861 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
6863 /* Turn off the SSC source */
6864 val &= ~DREF_SSC_SOURCE_MASK;
6865 val |= DREF_SSC_SOURCE_DISABLE;
6868 val &= ~DREF_SSC1_ENABLE;
6870 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6871 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6876 BUG_ON(val != final);
6879 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6883 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
6884 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6885 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
6887 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
6888 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6889 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
6891 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
6892 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6893 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
6895 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
6896 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6897 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
6900 /* WaMPhyProgramming:hsw */
6901 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6905 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6906 tmp &= ~(0xFF << 24);
6907 tmp |= (0x12 << 24);
6908 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6910 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6912 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6914 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6916 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6918 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6919 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6920 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6922 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6923 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6924 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6926 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6929 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6931 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6934 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6936 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6939 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6941 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6944 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6946 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6947 tmp &= ~(0xFF << 16);
6948 tmp |= (0x1C << 16);
6949 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6951 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6952 tmp &= ~(0xFF << 16);
6953 tmp |= (0x1C << 16);
6954 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6956 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6958 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6960 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6962 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6964 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6965 tmp &= ~(0xF << 28);
6967 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6969 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6970 tmp &= ~(0xF << 28);
6972 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6975 /* Implements 3 different sequences from BSpec chapter "Display iCLK
6976 * Programming" based on the parameters passed:
6977 * - Sequence to enable CLKOUT_DP
6978 * - Sequence to enable CLKOUT_DP without spread
6979 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6981 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
6982 bool with_spread, bool with_fdi)
6986 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
6987 "FDI requires downspread\n"))
6989 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
6990 with_fdi, "LP PCH doesn't have FDI\n"))
6993 mutex_lock(&dev_priv->sb_lock);
6995 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6996 tmp &= ~SBI_SSCCTL_DISABLE;
6997 tmp |= SBI_SSCCTL_PATHALT;
6998 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7003 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7004 tmp &= ~SBI_SSCCTL_PATHALT;
7005 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7008 lpt_reset_fdi_mphy(dev_priv);
7009 lpt_program_fdi_mphy(dev_priv);
7013 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
7014 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7015 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7016 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7018 mutex_unlock(&dev_priv->sb_lock);
7021 /* Sequence to disable CLKOUT_DP */
7022 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
7026 mutex_lock(&dev_priv->sb_lock);
7028 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
7029 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7030 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7031 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7033 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7034 if (!(tmp & SBI_SSCCTL_DISABLE)) {
7035 if (!(tmp & SBI_SSCCTL_PATHALT)) {
7036 tmp |= SBI_SSCCTL_PATHALT;
7037 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7040 tmp |= SBI_SSCCTL_DISABLE;
7041 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7044 mutex_unlock(&dev_priv->sb_lock);
7047 #define BEND_IDX(steps) ((50 + (steps)) / 5)
7049 static const u16 sscdivintphase[] = {
7050 [BEND_IDX( 50)] = 0x3B23,
7051 [BEND_IDX( 45)] = 0x3B23,
7052 [BEND_IDX( 40)] = 0x3C23,
7053 [BEND_IDX( 35)] = 0x3C23,
7054 [BEND_IDX( 30)] = 0x3D23,
7055 [BEND_IDX( 25)] = 0x3D23,
7056 [BEND_IDX( 20)] = 0x3E23,
7057 [BEND_IDX( 15)] = 0x3E23,
7058 [BEND_IDX( 10)] = 0x3F23,
7059 [BEND_IDX( 5)] = 0x3F23,
7060 [BEND_IDX( 0)] = 0x0025,
7061 [BEND_IDX( -5)] = 0x0025,
7062 [BEND_IDX(-10)] = 0x0125,
7063 [BEND_IDX(-15)] = 0x0125,
7064 [BEND_IDX(-20)] = 0x0225,
7065 [BEND_IDX(-25)] = 0x0225,
7066 [BEND_IDX(-30)] = 0x0325,
7067 [BEND_IDX(-35)] = 0x0325,
7068 [BEND_IDX(-40)] = 0x0425,
7069 [BEND_IDX(-45)] = 0x0425,
7070 [BEND_IDX(-50)] = 0x0525,
7075 * steps -50 to 50 inclusive, in steps of 5
7076 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
7077 * change in clock period = -(steps / 10) * 5.787 ps
7079 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
7082 int idx = BEND_IDX(steps);
7084 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
7087 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
7090 mutex_lock(&dev_priv->sb_lock);
7092 if (steps % 10 != 0)
7096 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
7098 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
7100 tmp |= sscdivintphase[idx];
7101 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
7103 mutex_unlock(&dev_priv->sb_lock);
7108 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
7110 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
7111 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
7113 if ((ctl & SPLL_PLL_ENABLE) == 0)
7116 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
7117 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
7120 if (IS_BROADWELL(dev_priv) &&
7121 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
7127 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
7128 enum intel_dpll_id id)
7130 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
7131 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
7133 if ((ctl & WRPLL_PLL_ENABLE) == 0)
7136 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
7139 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
7140 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
7141 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
7147 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
7149 struct intel_encoder *encoder;
7150 bool has_fdi = false;
7152 for_each_intel_encoder(&dev_priv->drm, encoder) {
7153 switch (encoder->type) {
7154 case INTEL_OUTPUT_ANALOG:
7163 * The BIOS may have decided to use the PCH SSC
7164 * reference so we must not disable it until the
7165 * relevant PLLs have stopped relying on it. We'll
7166 * just leave the PCH SSC reference enabled in case
7167 * any active PLL is using it. It will get disabled
7168 * after runtime suspend if we don't have FDI.
7170 * TODO: Move the whole reference clock handling
7171 * to the modeset sequence proper so that we can
7172 * actually enable/disable/reconfigure these things
7173 * safely. To do that we need to introduce a real
7174 * clock hierarchy. That would also allow us to do
7175 * clock bending finally.
7177 dev_priv->pch_ssc_use = 0;
7179 if (spll_uses_pch_ssc(dev_priv)) {
7180 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
7181 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
7184 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
7185 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
7186 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
7189 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
7190 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
7191 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
7194 if (dev_priv->pch_ssc_use)
7198 lpt_bend_clkout_dp(dev_priv, 0);
7199 lpt_enable_clkout_dp(dev_priv, true, true);
7201 lpt_disable_clkout_dp(dev_priv);
7206 * Initialize reference clocks when the driver loads
7208 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
7210 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
7211 ilk_init_pch_refclk(dev_priv);
7212 else if (HAS_PCH_LPT(dev_priv))
7213 lpt_init_pch_refclk(dev_priv);
7216 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
7218 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7219 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7220 enum pipe pipe = crtc->pipe;
7225 switch (crtc_state->pipe_bpp) {
7227 val |= PIPECONF_6BPC;
7230 val |= PIPECONF_8BPC;
7233 val |= PIPECONF_10BPC;
7236 val |= PIPECONF_12BPC;
7239 /* Case prevented by intel_choose_pipe_bpp_dither. */
7243 if (crtc_state->dither)
7244 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7246 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7247 val |= PIPECONF_INTERLACED_ILK;
7249 val |= PIPECONF_PROGRESSIVE;
7252 * This would end up with an odd purple hue over
7253 * the entire display. Make sure we don't do it.
7255 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
7256 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
7258 if (crtc_state->limited_color_range &&
7259 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7260 val |= PIPECONF_COLOR_RANGE_SELECT;
7262 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
7263 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
7265 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
7267 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
7269 intel_de_write(dev_priv, PIPECONF(pipe), val);
7270 intel_de_posting_read(dev_priv, PIPECONF(pipe));
7273 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
7275 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7276 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7277 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7280 if (IS_HASWELL(dev_priv) && crtc_state->dither)
7281 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7283 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7284 val |= PIPECONF_INTERLACED_ILK;
7286 val |= PIPECONF_PROGRESSIVE;
7288 if (IS_HASWELL(dev_priv) &&
7289 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
7290 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
7292 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
7293 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
7296 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
7298 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7299 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7302 switch (crtc_state->pipe_bpp) {
7304 val |= PIPEMISC_DITHER_6_BPC;
7307 val |= PIPEMISC_DITHER_8_BPC;
7310 val |= PIPEMISC_DITHER_10_BPC;
7313 val |= PIPEMISC_DITHER_12_BPC;
7316 MISSING_CASE(crtc_state->pipe_bpp);
7320 if (crtc_state->dither)
7321 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7323 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7324 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
7325 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
7327 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
7328 val |= PIPEMISC_YUV420_ENABLE |
7329 PIPEMISC_YUV420_MODE_FULL_BLEND;
7331 if (INTEL_GEN(dev_priv) >= 11 &&
7332 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
7333 BIT(PLANE_CURSOR))) == 0)
7334 val |= PIPEMISC_HDR_MODE_PRECISION;
7336 if (INTEL_GEN(dev_priv) >= 12)
7337 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
7339 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
7342 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
7344 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7347 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
7349 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
7350 case PIPEMISC_DITHER_6_BPC:
7352 case PIPEMISC_DITHER_8_BPC:
7354 case PIPEMISC_DITHER_10_BPC:
7356 case PIPEMISC_DITHER_12_BPC:
7364 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
7367 * Account for spread spectrum to avoid
7368 * oversubscribing the link. Max center spread
7369 * is 2.5%; use 5% for safety's sake.
7371 u32 bps = target_clock * bpp * 21 / 20;
7372 return DIV_ROUND_UP(bps, link_bw * 8);
7375 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7376 struct intel_link_m_n *m_n)
7378 struct drm_device *dev = crtc->base.dev;
7379 struct drm_i915_private *dev_priv = to_i915(dev);
7380 enum pipe pipe = crtc->pipe;
7382 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
7383 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
7384 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
7386 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
7387 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
7388 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7391 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7392 enum transcoder transcoder,
7393 struct intel_link_m_n *m_n,
7394 struct intel_link_m_n *m2_n2)
7396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7397 enum pipe pipe = crtc->pipe;
7399 if (INTEL_GEN(dev_priv) >= 5) {
7400 m_n->link_m = intel_de_read(dev_priv,
7401 PIPE_LINK_M1(transcoder));
7402 m_n->link_n = intel_de_read(dev_priv,
7403 PIPE_LINK_N1(transcoder));
7404 m_n->gmch_m = intel_de_read(dev_priv,
7405 PIPE_DATA_M1(transcoder))
7407 m_n->gmch_n = intel_de_read(dev_priv,
7408 PIPE_DATA_N1(transcoder));
7409 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
7410 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7412 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
7413 m2_n2->link_m = intel_de_read(dev_priv,
7414 PIPE_LINK_M2(transcoder));
7415 m2_n2->link_n = intel_de_read(dev_priv,
7416 PIPE_LINK_N2(transcoder));
7417 m2_n2->gmch_m = intel_de_read(dev_priv,
7418 PIPE_DATA_M2(transcoder))
7420 m2_n2->gmch_n = intel_de_read(dev_priv,
7421 PIPE_DATA_N2(transcoder));
7422 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
7423 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7426 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
7427 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
7428 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
7430 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
7431 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
7432 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7436 void intel_dp_get_m_n(struct intel_crtc *crtc,
7437 struct intel_crtc_state *pipe_config)
7439 if (pipe_config->has_pch_encoder)
7440 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7442 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7443 &pipe_config->dp_m_n,
7444 &pipe_config->dp_m2_n2);
7447 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
7448 struct intel_crtc_state *pipe_config)
7450 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7451 &pipe_config->fdi_m_n, NULL);
7454 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
7457 drm_rect_init(&crtc_state->pch_pfit.dst,
7458 pos >> 16, pos & 0xffff,
7459 size >> 16, size & 0xffff);
7462 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
7464 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7466 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
7470 /* find scaler attached to this pipe */
7471 for (i = 0; i < crtc->num_scalers; i++) {
7474 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
7475 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
7479 crtc_state->pch_pfit.enabled = true;
7481 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
7482 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
7484 ilk_get_pfit_pos_size(crtc_state, pos, size);
7486 scaler_state->scalers[i].in_use = true;
7490 scaler_state->scaler_id = id;
7492 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
7494 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
7497 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
7499 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7503 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
7504 if ((ctl & PF_ENABLE) == 0)
7507 crtc_state->pch_pfit.enabled = true;
7509 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
7510 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
7512 ilk_get_pfit_pos_size(crtc_state, pos, size);
7515 * We currently do not free assignements of panel fitters on
7516 * ivb/hsw (since we don't use the higher upscaling modes which
7517 * differentiates them) so just WARN about this case for now.
7519 drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
7520 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
7523 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
7524 struct intel_crtc_state *pipe_config)
7526 struct drm_device *dev = crtc->base.dev;
7527 struct drm_i915_private *dev_priv = to_i915(dev);
7528 enum intel_display_power_domain power_domain;
7529 intel_wakeref_t wakeref;
7533 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7534 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
7538 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7539 pipe_config->shared_dpll = NULL;
7542 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
7543 if (!(tmp & PIPECONF_ENABLE))
7546 switch (tmp & PIPECONF_BPC_MASK) {
7548 pipe_config->pipe_bpp = 18;
7551 pipe_config->pipe_bpp = 24;
7553 case PIPECONF_10BPC:
7554 pipe_config->pipe_bpp = 30;
7556 case PIPECONF_12BPC:
7557 pipe_config->pipe_bpp = 36;
7563 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7564 pipe_config->limited_color_range = true;
7566 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
7567 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
7568 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
7569 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
7572 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
7576 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
7577 PIPECONF_GAMMA_MODE_SHIFT;
7579 pipe_config->csc_mode = intel_de_read(dev_priv,
7580 PIPE_CSC_MODE(crtc->pipe));
7582 i9xx_get_pipe_color_config(pipe_config);
7583 intel_color_get_config(pipe_config);
7585 if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7586 struct intel_shared_dpll *pll;
7587 enum intel_dpll_id pll_id;
7590 pipe_config->has_pch_encoder = true;
7592 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
7593 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7594 FDI_DP_PORT_WIDTH_SHIFT) + 1;
7596 ilk_get_fdi_m_n_config(crtc, pipe_config);
7598 if (HAS_PCH_IBX(dev_priv)) {
7600 * The pipe->pch transcoder and pch transcoder->pll
7603 pll_id = (enum intel_dpll_id) crtc->pipe;
7605 tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7606 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7607 pll_id = DPLL_ID_PCH_PLL_B;
7609 pll_id= DPLL_ID_PCH_PLL_A;
7612 pipe_config->shared_dpll =
7613 intel_get_shared_dpll_by_id(dev_priv, pll_id);
7614 pll = pipe_config->shared_dpll;
7616 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7617 &pipe_config->dpll_hw_state);
7618 drm_WARN_ON(dev, !pll_active);
7620 tmp = pipe_config->dpll_hw_state.dpll;
7621 pipe_config->pixel_multiplier =
7622 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7623 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7625 ilk_pch_clock_get(crtc, pipe_config);
7627 pipe_config->pixel_multiplier = 1;
7630 intel_get_transcoder_timings(crtc, pipe_config);
7631 intel_get_pipe_src_size(crtc, pipe_config);
7633 ilk_get_pfit_config(pipe_config);
7638 intel_display_power_put(dev_priv, power_domain, wakeref);
7643 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7644 struct intel_crtc_state *pipe_config)
7646 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
7647 enum phy phy = intel_port_to_phy(dev_priv, port);
7648 struct icl_port_dpll *port_dpll;
7649 struct intel_shared_dpll *pll;
7650 enum intel_dpll_id id;
7654 clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
7655 id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
7657 if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
7660 pll = intel_get_shared_dpll_by_id(dev_priv, id);
7661 port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
7663 port_dpll->pll = pll;
7664 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7665 &port_dpll->hw_state);
7666 drm_WARN_ON(&dev_priv->drm, !pll_active);
7668 icl_set_active_port_dpll(pipe_config, port_dpll_id);
7671 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7672 struct intel_crtc_state *pipe_config)
7674 enum phy phy = intel_port_to_phy(dev_priv, port);
7675 enum icl_port_dpll_id port_dpll_id;
7676 struct icl_port_dpll *port_dpll;
7677 struct intel_shared_dpll *pll;
7678 enum intel_dpll_id id;
7683 if (intel_phy_is_combo(dev_priv, phy)) {
7686 if (IS_ALDERLAKE_S(dev_priv)) {
7687 reg = ADLS_DPCLKA_CFGCR(phy);
7688 mask = ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy);
7689 shift = ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy);
7690 } else if (IS_ROCKETLAKE(dev_priv)) {
7691 reg = ICL_DPCLKA_CFGCR0;
7692 mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
7693 shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
7695 reg = ICL_DPCLKA_CFGCR0;
7696 mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
7697 shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
7700 temp = intel_de_read(dev_priv, reg) & mask;
7702 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
7703 } else if (intel_phy_is_tc(dev_priv, phy)) {
7704 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
7706 if (clk_sel == DDI_CLK_SEL_MG) {
7707 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
7709 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
7711 drm_WARN_ON(&dev_priv->drm,
7712 clk_sel < DDI_CLK_SEL_TBT_162);
7713 id = DPLL_ID_ICL_TBTPLL;
7714 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
7717 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
7721 pll = intel_get_shared_dpll_by_id(dev_priv, id);
7722 port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
7724 port_dpll->pll = pll;
7725 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7726 &port_dpll->hw_state);
7727 drm_WARN_ON(&dev_priv->drm, !pll_active);
7729 icl_set_active_port_dpll(pipe_config, port_dpll_id);
7732 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7733 struct intel_crtc_state *pipe_config)
7735 struct intel_shared_dpll *pll;
7736 enum intel_dpll_id id;
7740 temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
7741 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
7743 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
7746 pll = intel_get_shared_dpll_by_id(dev_priv, id);
7748 pipe_config->shared_dpll = pll;
7749 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7750 &pipe_config->dpll_hw_state);
7751 drm_WARN_ON(&dev_priv->drm, !pll_active);
7754 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
7756 struct intel_crtc_state *pipe_config)
7758 struct intel_shared_dpll *pll;
7759 enum intel_dpll_id id;
7764 id = DPLL_ID_SKL_DPLL0;
7767 id = DPLL_ID_SKL_DPLL1;
7770 id = DPLL_ID_SKL_DPLL2;
7773 drm_err(&dev_priv->drm, "Incorrect port type\n");
7777 pll = intel_get_shared_dpll_by_id(dev_priv, id);
7779 pipe_config->shared_dpll = pll;
7780 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7781 &pipe_config->dpll_hw_state);
7782 drm_WARN_ON(&dev_priv->drm, !pll_active);
7785 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7786 struct intel_crtc_state *pipe_config)
7788 struct intel_shared_dpll *pll;
7789 enum intel_dpll_id id;
7793 temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
7794 id = temp >> (port * 3 + 1);
7796 if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
7799 pll = intel_get_shared_dpll_by_id(dev_priv, id);
7801 pipe_config->shared_dpll = pll;
7802 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7803 &pipe_config->dpll_hw_state);
7804 drm_WARN_ON(&dev_priv->drm, !pll_active);
7807 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7808 struct intel_crtc_state *pipe_config)
7810 struct intel_shared_dpll *pll;
7811 enum intel_dpll_id id;
7812 u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
7815 switch (ddi_pll_sel) {
7816 case PORT_CLK_SEL_WRPLL1:
7817 id = DPLL_ID_WRPLL1;
7819 case PORT_CLK_SEL_WRPLL2:
7820 id = DPLL_ID_WRPLL2;
7822 case PORT_CLK_SEL_SPLL:
7825 case PORT_CLK_SEL_LCPLL_810:
7826 id = DPLL_ID_LCPLL_810;
7828 case PORT_CLK_SEL_LCPLL_1350:
7829 id = DPLL_ID_LCPLL_1350;
7831 case PORT_CLK_SEL_LCPLL_2700:
7832 id = DPLL_ID_LCPLL_2700;
7835 MISSING_CASE(ddi_pll_sel);
7837 case PORT_CLK_SEL_NONE:
7841 pll = intel_get_shared_dpll_by_id(dev_priv, id);
7843 pipe_config->shared_dpll = pll;
7844 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7845 &pipe_config->dpll_hw_state);
7846 drm_WARN_ON(&dev_priv->drm, !pll_active);
7849 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
7850 struct intel_crtc_state *pipe_config,
7851 struct intel_display_power_domain_set *power_domain_set)
7853 struct drm_device *dev = crtc->base.dev;
7854 struct drm_i915_private *dev_priv = to_i915(dev);
7855 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
7856 unsigned long enabled_panel_transcoders = 0;
7857 enum transcoder panel_transcoder;
7860 if (INTEL_GEN(dev_priv) >= 11)
7861 panel_transcoder_mask |=
7862 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
7865 * The pipe->transcoder mapping is fixed with the exception of the eDP
7866 * and DSI transcoders handled below.
7868 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7871 * XXX: Do intel_display_power_get_if_enabled before reading this (for
7872 * consistency and less surprising code; it's in always on power).
7874 for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
7875 panel_transcoder_mask) {
7876 bool force_thru = false;
7877 enum pipe trans_pipe;
7879 tmp = intel_de_read(dev_priv,
7880 TRANS_DDI_FUNC_CTL(panel_transcoder));
7881 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
7885 * Log all enabled ones, only use the first one.
7887 * FIXME: This won't work for two separate DSI displays.
7889 enabled_panel_transcoders |= BIT(panel_transcoder);
7890 if (enabled_panel_transcoders != BIT(panel_transcoder))
7893 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7896 "unknown pipe linked to transcoder %s\n",
7897 transcoder_name(panel_transcoder));
7899 case TRANS_DDI_EDP_INPUT_A_ONOFF:
7902 case TRANS_DDI_EDP_INPUT_A_ON:
7903 trans_pipe = PIPE_A;
7905 case TRANS_DDI_EDP_INPUT_B_ONOFF:
7906 trans_pipe = PIPE_B;
7908 case TRANS_DDI_EDP_INPUT_C_ONOFF:
7909 trans_pipe = PIPE_C;
7911 case TRANS_DDI_EDP_INPUT_D_ONOFF:
7912 trans_pipe = PIPE_D;
7916 if (trans_pipe == crtc->pipe) {
7917 pipe_config->cpu_transcoder = panel_transcoder;
7918 pipe_config->pch_pfit.force_thru = force_thru;
7923 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
7925 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
7926 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
7928 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
7929 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7932 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
7934 return tmp & PIPECONF_ENABLE;
7937 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
7938 struct intel_crtc_state *pipe_config,
7939 struct intel_display_power_domain_set *power_domain_set)
7941 struct drm_device *dev = crtc->base.dev;
7942 struct drm_i915_private *dev_priv = to_i915(dev);
7943 enum transcoder cpu_transcoder;
7947 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
7949 cpu_transcoder = TRANSCODER_DSI_A;
7951 cpu_transcoder = TRANSCODER_DSI_C;
7953 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
7954 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
7958 * The PLL needs to be enabled with a valid divider
7959 * configuration, otherwise accessing DSI registers will hang
7960 * the machine. See BSpec North Display Engine
7961 * registers/MIPI[BXT]. We can break out here early, since we
7962 * need the same DSI PLL to be enabled for both DSI ports.
7964 if (!bxt_dsi_pll_is_enabled(dev_priv))
7967 /* XXX: this works for video mode only */
7968 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
7969 if (!(tmp & DPI_ENABLE))
7972 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
7973 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
7976 pipe_config->cpu_transcoder = cpu_transcoder;
7980 return transcoder_is_dsi(pipe_config->cpu_transcoder);
7983 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
7984 struct intel_crtc_state *pipe_config)
7986 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7987 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7991 if (transcoder_is_dsi(cpu_transcoder)) {
7992 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
7995 tmp = intel_de_read(dev_priv,
7996 TRANS_DDI_FUNC_CTL(cpu_transcoder));
7997 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
7999 if (INTEL_GEN(dev_priv) >= 12)
8000 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
8002 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
8005 if (IS_DG1(dev_priv))
8006 dg1_get_ddi_pll(dev_priv, port, pipe_config);
8007 else if (INTEL_GEN(dev_priv) >= 11)
8008 icl_get_ddi_pll(dev_priv, port, pipe_config);
8009 else if (IS_CANNONLAKE(dev_priv))
8010 cnl_get_ddi_pll(dev_priv, port, pipe_config);
8011 else if (IS_GEN9_LP(dev_priv))
8012 bxt_get_ddi_pll(dev_priv, port, pipe_config);
8013 else if (IS_GEN9_BC(dev_priv))
8014 skl_get_ddi_pll(dev_priv, port, pipe_config);
8016 hsw_get_ddi_pll(dev_priv, port, pipe_config);
8019 * Haswell has only FDI/PCH transcoder A. It is which is connected to
8020 * DDI E. So just check whether this pipe is wired to DDI E and whether
8021 * the PCH transcoder is on.
8023 if (INTEL_GEN(dev_priv) < 9 &&
8024 (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
8025 pipe_config->has_pch_encoder = true;
8027 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
8028 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8029 FDI_DP_PORT_WIDTH_SHIFT) + 1;
8031 ilk_get_fdi_m_n_config(crtc, pipe_config);
8035 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
8036 struct intel_crtc_state *pipe_config)
8038 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8039 struct intel_display_power_domain_set power_domain_set = { };
8043 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
8044 POWER_DOMAIN_PIPE(crtc->pipe)))
8047 pipe_config->shared_dpll = NULL;
8049 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
8051 if (IS_GEN9_LP(dev_priv) &&
8052 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
8053 drm_WARN_ON(&dev_priv->drm, active);
8057 intel_dsc_get_config(pipe_config);
8060 /* bigjoiner slave doesn't enable transcoder */
8061 if (!pipe_config->bigjoiner_slave)
8065 pipe_config->pixel_multiplier = 1;
8067 /* we cannot read out most state, so don't bother.. */
8068 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
8069 } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
8070 INTEL_GEN(dev_priv) >= 11) {
8071 hsw_get_ddi_port_state(crtc, pipe_config);
8072 intel_get_transcoder_timings(crtc, pipe_config);
8075 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
8076 intel_vrr_get_config(crtc, pipe_config);
8078 intel_get_pipe_src_size(crtc, pipe_config);
8080 if (IS_HASWELL(dev_priv)) {
8081 u32 tmp = intel_de_read(dev_priv,
8082 PIPECONF(pipe_config->cpu_transcoder));
8084 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
8085 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
8087 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8089 pipe_config->output_format =
8090 bdw_get_pipemisc_output_format(crtc);
8093 pipe_config->gamma_mode = intel_de_read(dev_priv,
8094 GAMMA_MODE(crtc->pipe));
8096 pipe_config->csc_mode = intel_de_read(dev_priv,
8097 PIPE_CSC_MODE(crtc->pipe));
8099 if (INTEL_GEN(dev_priv) >= 9) {
8100 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
8102 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
8103 pipe_config->gamma_enable = true;
8105 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
8106 pipe_config->csc_enable = true;
8108 i9xx_get_pipe_color_config(pipe_config);
8111 intel_color_get_config(pipe_config);
8113 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
8114 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
8115 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8116 pipe_config->ips_linetime =
8117 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
8119 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
8120 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
8121 if (INTEL_GEN(dev_priv) >= 9)
8122 skl_get_pfit_config(pipe_config);
8124 ilk_get_pfit_config(pipe_config);
8127 if (hsw_crtc_supports_ips(crtc)) {
8128 if (IS_HASWELL(dev_priv))
8129 pipe_config->ips_enabled = intel_de_read(dev_priv,
8130 IPS_CTL) & IPS_ENABLE;
8133 * We cannot readout IPS state on broadwell, set to
8134 * true so we can set it to a defined state on first
8137 pipe_config->ips_enabled = true;
8141 if (pipe_config->bigjoiner_slave) {
8142 /* Cannot be read out as a slave, set to 0. */
8143 pipe_config->pixel_multiplier = 0;
8144 } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
8145 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
8146 pipe_config->pixel_multiplier =
8147 intel_de_read(dev_priv,
8148 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
8150 pipe_config->pixel_multiplier = 1;
8154 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
8159 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
8161 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8162 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8164 if (!i915->display.get_pipe_config(crtc, crtc_state))
8167 crtc_state->hw.active = true;
8169 intel_crtc_readout_derived_state(crtc_state);
8174 /* VESA 640x480x72Hz mode to set on the pipe */
8175 static const struct drm_display_mode load_detect_mode = {
8176 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8177 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8180 struct drm_framebuffer *
8181 intel_framebuffer_create(struct drm_i915_gem_object *obj,
8182 struct drm_mode_fb_cmd2 *mode_cmd)
8184 struct intel_framebuffer *intel_fb;
8187 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8189 return ERR_PTR(-ENOMEM);
8191 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
8195 return &intel_fb->base;
8199 return ERR_PTR(ret);
8202 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
8203 struct drm_crtc *crtc)
8205 struct drm_plane *plane;
8206 struct drm_plane_state *plane_state;
8209 ret = drm_atomic_add_affected_planes(state, crtc);
8213 for_each_new_plane_in_state(state, plane, plane_state, i) {
8214 if (plane_state->crtc != crtc)
8217 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
8221 drm_atomic_set_fb_for_plane(plane_state, NULL);
8227 int intel_get_load_detect_pipe(struct drm_connector *connector,
8228 struct intel_load_detect_pipe *old,
8229 struct drm_modeset_acquire_ctx *ctx)
8231 struct intel_crtc *intel_crtc;
8232 struct intel_encoder *intel_encoder =
8233 intel_attached_encoder(to_intel_connector(connector));
8234 struct drm_crtc *possible_crtc;
8235 struct drm_encoder *encoder = &intel_encoder->base;
8236 struct drm_crtc *crtc = NULL;
8237 struct drm_device *dev = encoder->dev;
8238 struct drm_i915_private *dev_priv = to_i915(dev);
8239 struct drm_mode_config *config = &dev->mode_config;
8240 struct drm_atomic_state *state = NULL, *restore_state = NULL;
8241 struct drm_connector_state *connector_state;
8242 struct intel_crtc_state *crtc_state;
8245 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8246 connector->base.id, connector->name,
8247 encoder->base.id, encoder->name);
8249 old->restore_state = NULL;
8251 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
8254 * Algorithm gets a little messy:
8256 * - if the connector already has an assigned crtc, use it (but make
8257 * sure it's on first)
8259 * - try to find the first unused crtc that can drive this connector,
8260 * and use that if we find one
8263 /* See if we already have a CRTC for this connector */
8264 if (connector->state->crtc) {
8265 crtc = connector->state->crtc;
8267 ret = drm_modeset_lock(&crtc->mutex, ctx);
8271 /* Make sure the crtc and connector are running */
8275 /* Find an unused one (if possible) */
8276 for_each_crtc(dev, possible_crtc) {
8278 if (!(encoder->possible_crtcs & (1 << i)))
8281 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
8285 if (possible_crtc->state->enable) {
8286 drm_modeset_unlock(&possible_crtc->mutex);
8290 crtc = possible_crtc;
8295 * If we didn't find an unused CRTC, don't use any.
8298 drm_dbg_kms(&dev_priv->drm,
8299 "no pipe available for load-detect\n");
8305 intel_crtc = to_intel_crtc(crtc);
8307 state = drm_atomic_state_alloc(dev);
8308 restore_state = drm_atomic_state_alloc(dev);
8309 if (!state || !restore_state) {
8314 state->acquire_ctx = ctx;
8315 restore_state->acquire_ctx = ctx;
8317 connector_state = drm_atomic_get_connector_state(state, connector);
8318 if (IS_ERR(connector_state)) {
8319 ret = PTR_ERR(connector_state);
8323 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
8327 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
8328 if (IS_ERR(crtc_state)) {
8329 ret = PTR_ERR(crtc_state);
8333 crtc_state->uapi.active = true;
8335 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
8340 ret = intel_modeset_disable_planes(state, crtc);
8344 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
8346 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
8348 ret = drm_atomic_add_affected_planes(restore_state, crtc);
8350 drm_dbg_kms(&dev_priv->drm,
8351 "Failed to create a copy of old state to restore: %i\n",
8356 ret = drm_atomic_commit(state);
8358 drm_dbg_kms(&dev_priv->drm,
8359 "failed to set mode on load-detect pipe\n");
8363 old->restore_state = restore_state;
8364 drm_atomic_state_put(state);
8366 /* let the connector get through one full cycle before testing */
8367 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
8372 drm_atomic_state_put(state);
8375 if (restore_state) {
8376 drm_atomic_state_put(restore_state);
8377 restore_state = NULL;
8380 if (ret == -EDEADLK)
8386 void intel_release_load_detect_pipe(struct drm_connector *connector,
8387 struct intel_load_detect_pipe *old,
8388 struct drm_modeset_acquire_ctx *ctx)
8390 struct intel_encoder *intel_encoder =
8391 intel_attached_encoder(to_intel_connector(connector));
8392 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
8393 struct drm_encoder *encoder = &intel_encoder->base;
8394 struct drm_atomic_state *state = old->restore_state;
8397 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8398 connector->base.id, connector->name,
8399 encoder->base.id, encoder->name);
8404 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
8406 drm_dbg_kms(&i915->drm,
8407 "Couldn't release load detect pipe: %i\n", ret);
8408 drm_atomic_state_put(state);
8411 static int i9xx_pll_refclk(struct drm_device *dev,
8412 const struct intel_crtc_state *pipe_config)
8414 struct drm_i915_private *dev_priv = to_i915(dev);
8415 u32 dpll = pipe_config->dpll_hw_state.dpll;
8417 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8418 return dev_priv->vbt.lvds_ssc_freq;
8419 else if (HAS_PCH_SPLIT(dev_priv))
8421 else if (!IS_GEN(dev_priv, 2))
8427 /* Returns the clock of the currently programmed mode of the given pipe. */
8428 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8429 struct intel_crtc_state *pipe_config)
8431 struct drm_device *dev = crtc->base.dev;
8432 struct drm_i915_private *dev_priv = to_i915(dev);
8433 enum pipe pipe = crtc->pipe;
8434 u32 dpll = pipe_config->dpll_hw_state.dpll;
8438 int refclk = i9xx_pll_refclk(dev, pipe_config);
8440 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8441 fp = pipe_config->dpll_hw_state.fp0;
8443 fp = pipe_config->dpll_hw_state.fp1;
8445 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8446 if (IS_PINEVIEW(dev_priv)) {
8447 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8448 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8450 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8451 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8454 if (!IS_GEN(dev_priv, 2)) {
8455 if (IS_PINEVIEW(dev_priv))
8456 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8457 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8459 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8460 DPLL_FPA01_P1_POST_DIV_SHIFT);
8462 switch (dpll & DPLL_MODE_MASK) {
8463 case DPLLB_MODE_DAC_SERIAL:
8464 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8467 case DPLLB_MODE_LVDS:
8468 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8472 drm_dbg_kms(&dev_priv->drm,
8473 "Unknown DPLL mode %08x in programmed "
8474 "mode\n", (int)(dpll & DPLL_MODE_MASK));
8478 if (IS_PINEVIEW(dev_priv))
8479 port_clock = pnv_calc_dpll_params(refclk, &clock);
8481 port_clock = i9xx_calc_dpll_params(refclk, &clock);
8483 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
8485 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8488 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8489 DPLL_FPA01_P1_POST_DIV_SHIFT);
8491 if (lvds & LVDS_CLKB_POWER_UP)
8496 if (dpll & PLL_P1_DIVIDE_BY_TWO)
8499 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8500 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8502 if (dpll & PLL_P2_DIVIDE_BY_4)
8508 port_clock = i9xx_calc_dpll_params(refclk, &clock);
8512 * This value includes pixel_multiplier. We will use
8513 * port_clock to compute adjusted_mode.crtc_clock in the
8514 * encoder's get_config() function.
8516 pipe_config->port_clock = port_clock;
8519 int intel_dotclock_calculate(int link_freq,
8520 const struct intel_link_m_n *m_n)
8523 * The calculation for the data clock is:
8524 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8525 * But we want to avoid losing precison if possible, so:
8526 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8528 * and the link clock is simpler:
8529 * link_clock = (m * link_clock) / n
8535 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
8538 static void ilk_pch_clock_get(struct intel_crtc *crtc,
8539 struct intel_crtc_state *pipe_config)
8541 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8543 /* read out port_clock from the DPLL */
8544 i9xx_crtc_clock_get(crtc, pipe_config);
8547 * In case there is an active pipe without active ports,
8548 * we may need some idea for the dotclock anyway.
8549 * Calculate one based on the FDI configuration.
8551 pipe_config->hw.adjusted_mode.crtc_clock =
8552 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8553 &pipe_config->fdi_m_n);
8556 /* Returns the currently programmed mode of the given encoder. */
8557 struct drm_display_mode *
8558 intel_encoder_current_mode(struct intel_encoder *encoder)
8560 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
8561 struct intel_crtc_state *crtc_state;
8562 struct drm_display_mode *mode;
8563 struct intel_crtc *crtc;
8566 if (!encoder->get_hw_state(encoder, &pipe))
8569 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8571 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8575 crtc_state = intel_crtc_state_alloc(crtc);
8581 if (!intel_crtc_get_pipe_config(crtc_state)) {
8587 intel_encoder_get_config(encoder, crtc_state);
8589 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
8597 * intel_wm_need_update - Check whether watermarks need updating
8598 * @cur: current plane state
8599 * @new: new plane state
8601 * Check current plane state versus the new one to determine whether
8602 * watermarks need to be recalculated.
8604 * Returns true or false.
8606 static bool intel_wm_need_update(const struct intel_plane_state *cur,
8607 struct intel_plane_state *new)
8609 /* Update watermarks on tiling or size changes. */
8610 if (new->uapi.visible != cur->uapi.visible)
8613 if (!cur->hw.fb || !new->hw.fb)
8616 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
8617 cur->hw.rotation != new->hw.rotation ||
8618 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
8619 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
8620 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
8621 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
8627 static bool needs_scaling(const struct intel_plane_state *state)
8629 int src_w = drm_rect_width(&state->uapi.src) >> 16;
8630 int src_h = drm_rect_height(&state->uapi.src) >> 16;
8631 int dst_w = drm_rect_width(&state->uapi.dst);
8632 int dst_h = drm_rect_height(&state->uapi.dst);
8634 return (src_w != dst_w || src_h != dst_h);
8637 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
8638 struct intel_crtc_state *crtc_state,
8639 const struct intel_plane_state *old_plane_state,
8640 struct intel_plane_state *plane_state)
8642 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8643 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
8644 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8645 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8646 bool was_crtc_enabled = old_crtc_state->hw.active;
8647 bool is_crtc_enabled = crtc_state->hw.active;
8648 bool turn_off, turn_on, visible, was_visible;
8651 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
8652 ret = skl_update_scaler_plane(crtc_state, plane_state);
8657 was_visible = old_plane_state->uapi.visible;
8658 visible = plane_state->uapi.visible;
8660 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
8661 was_visible = false;
8664 * Visibility is calculated as if the crtc was on, but
8665 * after scaler setup everything depends on it being off
8666 * when the crtc isn't active.
8668 * FIXME this is wrong for watermarks. Watermarks should also
8669 * be computed as if the pipe would be active. Perhaps move
8670 * per-plane wm computation to the .check_plane() hook, and
8671 * only combine the results from all planes in the current place?
8673 if (!is_crtc_enabled) {
8674 intel_plane_set_invisible(crtc_state, plane_state);
8678 if (!was_visible && !visible)
8681 turn_off = was_visible && (!visible || mode_changed);
8682 turn_on = visible && (!was_visible || mode_changed);
8684 drm_dbg_atomic(&dev_priv->drm,
8685 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
8686 crtc->base.base.id, crtc->base.name,
8687 plane->base.base.id, plane->base.name,
8688 was_visible, visible,
8689 turn_off, turn_on, mode_changed);
8692 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
8693 crtc_state->update_wm_pre = true;
8695 /* must disable cxsr around plane enable/disable */
8696 if (plane->id != PLANE_CURSOR)
8697 crtc_state->disable_cxsr = true;
8698 } else if (turn_off) {
8699 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
8700 crtc_state->update_wm_post = true;
8702 /* must disable cxsr around plane enable/disable */
8703 if (plane->id != PLANE_CURSOR)
8704 crtc_state->disable_cxsr = true;
8705 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
8706 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
8707 /* FIXME bollocks */
8708 crtc_state->update_wm_pre = true;
8709 crtc_state->update_wm_post = true;
8713 if (visible || was_visible)
8714 crtc_state->fb_bits |= plane->frontbuffer_bit;
8717 * ILK/SNB DVSACNTR/Sprite Enable
8718 * IVB SPR_CTL/Sprite Enable
8719 * "When in Self Refresh Big FIFO mode, a write to enable the
8720 * plane will be internally buffered and delayed while Big FIFO
8723 * Which means that enabling the sprite can take an extra frame
8724 * when we start in big FIFO mode (LP1+). Thus we need to drop
8725 * down to LP0 and wait for vblank in order to make sure the
8726 * sprite gets enabled on the next vblank after the register write.
8727 * Doing otherwise would risk enabling the sprite one frame after
8728 * we've already signalled flip completion. We can resume LP1+
8729 * once the sprite has been enabled.
8732 * WaCxSRDisabledForSpriteScaling:ivb
8733 * IVB SPR_SCALE/Scaling Enable
8734 * "Low Power watermarks must be disabled for at least one
8735 * frame before enabling sprite scaling, and kept disabled
8736 * until sprite scaling is disabled."
8738 * ILK/SNB DVSASCALE/Scaling Enable
8739 * "When in Self Refresh Big FIFO mode, scaling enable will be
8740 * masked off while Big FIFO mode is exiting."
8742 * Despite the w/a only being listed for IVB we assume that
8743 * the ILK/SNB note has similar ramifications, hence we apply
8744 * the w/a on all three platforms.
8746 * With experimental results seems this is needed also for primary
8747 * plane, not only sprite plane.
8749 if (plane->id != PLANE_CURSOR &&
8750 (IS_GEN_RANGE(dev_priv, 5, 6) ||
8751 IS_IVYBRIDGE(dev_priv)) &&
8752 (turn_on || (!needs_scaling(old_plane_state) &&
8753 needs_scaling(plane_state))))
8754 crtc_state->disable_lp_wm = true;
8759 static bool encoders_cloneable(const struct intel_encoder *a,
8760 const struct intel_encoder *b)
8762 /* masks could be asymmetric, so check both ways */
8763 return a == b || (a->cloneable & (1 << b->type) &&
8764 b->cloneable & (1 << a->type));
8767 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
8768 struct intel_crtc *crtc,
8769 struct intel_encoder *encoder)
8771 struct intel_encoder *source_encoder;
8772 struct drm_connector *connector;
8773 struct drm_connector_state *connector_state;
8776 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8777 if (connector_state->crtc != &crtc->base)
8781 to_intel_encoder(connector_state->best_encoder);
8782 if (!encoders_cloneable(encoder, source_encoder))
8789 static int icl_add_linked_planes(struct intel_atomic_state *state)
8791 struct intel_plane *plane, *linked;
8792 struct intel_plane_state *plane_state, *linked_plane_state;
8795 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8796 linked = plane_state->planar_linked_plane;
8801 linked_plane_state = intel_atomic_get_plane_state(state, linked);
8802 if (IS_ERR(linked_plane_state))
8803 return PTR_ERR(linked_plane_state);
8805 drm_WARN_ON(state->base.dev,
8806 linked_plane_state->planar_linked_plane != plane);
8807 drm_WARN_ON(state->base.dev,
8808 linked_plane_state->planar_slave == plane_state->planar_slave);
8814 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
8816 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8817 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8818 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
8819 struct intel_plane *plane, *linked;
8820 struct intel_plane_state *plane_state;
8823 if (INTEL_GEN(dev_priv) < 11)
8827 * Destroy all old plane links and make the slave plane invisible
8828 * in the crtc_state->active_planes mask.
8830 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8831 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
8834 plane_state->planar_linked_plane = NULL;
8835 if (plane_state->planar_slave && !plane_state->uapi.visible) {
8836 crtc_state->enabled_planes &= ~BIT(plane->id);
8837 crtc_state->active_planes &= ~BIT(plane->id);
8838 crtc_state->update_planes |= BIT(plane->id);
8841 plane_state->planar_slave = false;
8844 if (!crtc_state->nv12_planes)
8847 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8848 struct intel_plane_state *linked_state = NULL;
8850 if (plane->pipe != crtc->pipe ||
8851 !(crtc_state->nv12_planes & BIT(plane->id)))
8854 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
8855 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
8858 if (crtc_state->active_planes & BIT(linked->id))
8861 linked_state = intel_atomic_get_plane_state(state, linked);
8862 if (IS_ERR(linked_state))
8863 return PTR_ERR(linked_state);
8868 if (!linked_state) {
8869 drm_dbg_kms(&dev_priv->drm,
8870 "Need %d free Y planes for planar YUV\n",
8871 hweight8(crtc_state->nv12_planes));
8876 plane_state->planar_linked_plane = linked;
8878 linked_state->planar_slave = true;
8879 linked_state->planar_linked_plane = plane;
8880 crtc_state->enabled_planes |= BIT(linked->id);
8881 crtc_state->active_planes |= BIT(linked->id);
8882 crtc_state->update_planes |= BIT(linked->id);
8883 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
8884 linked->base.name, plane->base.name);
8886 /* Copy parameters to slave plane */
8887 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
8888 linked_state->color_ctl = plane_state->color_ctl;
8889 linked_state->view = plane_state->view;
8890 memcpy(linked_state->color_plane, plane_state->color_plane,
8891 sizeof(linked_state->color_plane));
8893 intel_plane_copy_hw_state(linked_state, plane_state);
8894 linked_state->uapi.src = plane_state->uapi.src;
8895 linked_state->uapi.dst = plane_state->uapi.dst;
8897 if (icl_is_hdr_plane(dev_priv, plane->id)) {
8898 if (linked->id == PLANE_SPRITE5)
8899 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
8900 else if (linked->id == PLANE_SPRITE4)
8901 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
8902 else if (linked->id == PLANE_SPRITE3)
8903 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
8904 else if (linked->id == PLANE_SPRITE2)
8905 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
8907 MISSING_CASE(linked->id);
8914 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
8916 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8917 struct intel_atomic_state *state =
8918 to_intel_atomic_state(new_crtc_state->uapi.state);
8919 const struct intel_crtc_state *old_crtc_state =
8920 intel_atomic_get_old_crtc_state(state, crtc);
8922 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
8925 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
8927 const struct drm_display_mode *pipe_mode =
8928 &crtc_state->hw.pipe_mode;
8931 if (!crtc_state->hw.enable)
8934 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
8935 pipe_mode->crtc_clock);
8937 return min(linetime_wm, 0x1ff);
8940 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
8941 const struct intel_cdclk_state *cdclk_state)
8943 const struct drm_display_mode *pipe_mode =
8944 &crtc_state->hw.pipe_mode;
8947 if (!crtc_state->hw.enable)
8950 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
8951 cdclk_state->logical.cdclk);
8953 return min(linetime_wm, 0x1ff);
8956 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
8958 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8959 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8960 const struct drm_display_mode *pipe_mode =
8961 &crtc_state->hw.pipe_mode;
8964 if (!crtc_state->hw.enable)
8967 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
8968 crtc_state->pixel_rate);
8970 /* Display WA #1135: BXT:ALL GLK:ALL */
8971 if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
8974 return min(linetime_wm, 0x1ff);
8977 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
8978 struct intel_crtc *crtc)
8980 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8981 struct intel_crtc_state *crtc_state =
8982 intel_atomic_get_new_crtc_state(state, crtc);
8983 const struct intel_cdclk_state *cdclk_state;
8985 if (INTEL_GEN(dev_priv) >= 9)
8986 crtc_state->linetime = skl_linetime_wm(crtc_state);
8988 crtc_state->linetime = hsw_linetime_wm(crtc_state);
8990 if (!hsw_crtc_supports_ips(crtc))
8993 cdclk_state = intel_atomic_get_cdclk_state(state);
8994 if (IS_ERR(cdclk_state))
8995 return PTR_ERR(cdclk_state);
8997 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
9003 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
9004 struct intel_crtc *crtc)
9006 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9007 struct intel_crtc_state *crtc_state =
9008 intel_atomic_get_new_crtc_state(state, crtc);
9009 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9012 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
9013 mode_changed && !crtc_state->hw.active)
9014 crtc_state->update_wm_post = true;
9016 if (mode_changed && crtc_state->hw.enable &&
9017 dev_priv->display.crtc_compute_clock &&
9018 !crtc_state->bigjoiner_slave &&
9019 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
9020 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
9026 * May need to update pipe gamma enable bits
9027 * when C8 planes are getting enabled/disabled.
9029 if (c8_planes_changed(crtc_state))
9030 crtc_state->uapi.color_mgmt_changed = true;
9032 if (mode_changed || crtc_state->update_pipe ||
9033 crtc_state->uapi.color_mgmt_changed) {
9034 ret = intel_color_check(crtc_state);
9039 if (dev_priv->display.compute_pipe_wm) {
9040 ret = dev_priv->display.compute_pipe_wm(crtc_state);
9042 drm_dbg_kms(&dev_priv->drm,
9043 "Target pipe watermarks are invalid\n");
9048 if (dev_priv->display.compute_intermediate_wm) {
9049 if (drm_WARN_ON(&dev_priv->drm,
9050 !dev_priv->display.compute_pipe_wm))
9054 * Calculate 'intermediate' watermarks that satisfy both the
9055 * old state and the new state. We can program these
9058 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
9060 drm_dbg_kms(&dev_priv->drm,
9061 "No valid intermediate pipe watermarks are possible\n");
9066 if (INTEL_GEN(dev_priv) >= 9) {
9067 if (mode_changed || crtc_state->update_pipe) {
9068 ret = skl_update_scaler_crtc(crtc_state);
9073 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
9078 if (HAS_IPS(dev_priv)) {
9079 ret = hsw_compute_ips_config(crtc_state);
9084 if (INTEL_GEN(dev_priv) >= 9 ||
9085 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9086 ret = hsw_compute_linetime_wm(state, crtc);
9092 if (!mode_changed) {
9093 ret = intel_psr2_sel_fetch_update(state, crtc);
9101 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
9103 struct intel_connector *connector;
9104 struct drm_connector_list_iter conn_iter;
9106 drm_connector_list_iter_begin(dev, &conn_iter);
9107 for_each_intel_connector_iter(connector, &conn_iter) {
9108 if (connector->base.state->crtc)
9109 drm_connector_put(&connector->base);
9111 if (connector->base.encoder) {
9112 connector->base.state->best_encoder =
9113 connector->base.encoder;
9114 connector->base.state->crtc =
9115 connector->base.encoder->crtc;
9117 drm_connector_get(&connector->base);
9119 connector->base.state->best_encoder = NULL;
9120 connector->base.state->crtc = NULL;
9123 drm_connector_list_iter_end(&conn_iter);
9127 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
9128 struct intel_crtc_state *pipe_config)
9130 struct drm_connector *connector = conn_state->connector;
9131 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
9132 const struct drm_display_info *info = &connector->display_info;
9135 switch (conn_state->max_bpc) {
9149 MISSING_CASE(conn_state->max_bpc);
9153 if (bpp < pipe_config->pipe_bpp) {
9154 drm_dbg_kms(&i915->drm,
9155 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
9156 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
9157 connector->base.id, connector->name,
9159 3 * conn_state->max_requested_bpc,
9160 pipe_config->pipe_bpp);
9162 pipe_config->pipe_bpp = bpp;
9169 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9170 struct intel_crtc_state *pipe_config)
9172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9173 struct drm_atomic_state *state = pipe_config->uapi.state;
9174 struct drm_connector *connector;
9175 struct drm_connector_state *connector_state;
9178 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9179 IS_CHERRYVIEW(dev_priv)))
9181 else if (INTEL_GEN(dev_priv) >= 5)
9186 pipe_config->pipe_bpp = bpp;
9188 /* Clamp display bpp to connector max bpp */
9189 for_each_new_connector_in_state(state, connector, connector_state, i) {
9192 if (connector_state->crtc != &crtc->base)
9195 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
9203 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
9204 const struct drm_display_mode *mode)
9206 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
9207 "type: 0x%x flags: 0x%x\n",
9209 mode->crtc_hdisplay, mode->crtc_hsync_start,
9210 mode->crtc_hsync_end, mode->crtc_htotal,
9211 mode->crtc_vdisplay, mode->crtc_vsync_start,
9212 mode->crtc_vsync_end, mode->crtc_vtotal,
9213 mode->type, mode->flags);
9217 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
9218 const char *id, unsigned int lane_count,
9219 const struct intel_link_m_n *m_n)
9221 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
9223 drm_dbg_kms(&i915->drm,
9224 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9226 m_n->gmch_m, m_n->gmch_n,
9227 m_n->link_m, m_n->link_n, m_n->tu);
9231 intel_dump_infoframe(struct drm_i915_private *dev_priv,
9232 const union hdmi_infoframe *frame)
9234 if (!drm_debug_enabled(DRM_UT_KMS))
9237 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
9241 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
9242 const struct drm_dp_vsc_sdp *vsc)
9244 if (!drm_debug_enabled(DRM_UT_KMS))
9247 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
9250 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
9252 static const char * const output_type_str[] = {
9253 OUTPUT_TYPE(UNUSED),
9254 OUTPUT_TYPE(ANALOG),
9264 OUTPUT_TYPE(DP_MST),
9269 static void snprintf_output_types(char *buf, size_t len,
9270 unsigned int output_types)
9277 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
9280 if ((output_types & BIT(i)) == 0)
9283 r = snprintf(str, len, "%s%s",
9284 str != buf ? "," : "", output_type_str[i]);
9290 output_types &= ~BIT(i);
9293 WARN_ON_ONCE(output_types != 0);
9296 static const char * const output_format_str[] = {
9297 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
9298 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
9299 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
9300 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
9303 static const char *output_formats(enum intel_output_format format)
9305 if (format >= ARRAY_SIZE(output_format_str))
9306 format = INTEL_OUTPUT_FORMAT_INVALID;
9307 return output_format_str[format];
9310 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
9312 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
9313 struct drm_i915_private *i915 = to_i915(plane->base.dev);
9314 const struct drm_framebuffer *fb = plane_state->hw.fb;
9315 struct drm_format_name_buf format_name;
9318 drm_dbg_kms(&i915->drm,
9319 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
9320 plane->base.base.id, plane->base.name,
9321 yesno(plane_state->uapi.visible));
9325 drm_dbg_kms(&i915->drm,
9326 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
9327 plane->base.base.id, plane->base.name,
9328 fb->base.id, fb->width, fb->height,
9329 drm_get_format_name(fb->format->format, &format_name),
9330 fb->modifier, yesno(plane_state->uapi.visible));
9331 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
9332 plane_state->hw.rotation, plane_state->scaler_id);
9333 if (plane_state->uapi.visible)
9334 drm_dbg_kms(&i915->drm,
9335 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
9336 DRM_RECT_FP_ARG(&plane_state->uapi.src),
9337 DRM_RECT_ARG(&plane_state->uapi.dst));
9340 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
9341 struct intel_atomic_state *state,
9342 const char *context)
9344 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
9345 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9346 const struct intel_plane_state *plane_state;
9347 struct intel_plane *plane;
9351 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
9352 crtc->base.base.id, crtc->base.name,
9353 yesno(pipe_config->hw.enable), context);
9355 if (!pipe_config->hw.enable)
9358 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
9359 drm_dbg_kms(&dev_priv->drm,
9360 "active: %s, output_types: %s (0x%x), output format: %s\n",
9361 yesno(pipe_config->hw.active),
9362 buf, pipe_config->output_types,
9363 output_formats(pipe_config->output_format));
9365 drm_dbg_kms(&dev_priv->drm,
9366 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
9367 transcoder_name(pipe_config->cpu_transcoder),
9368 pipe_config->pipe_bpp, pipe_config->dither);
9370 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
9371 transcoder_name(pipe_config->mst_master_transcoder));
9373 drm_dbg_kms(&dev_priv->drm,
9374 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
9375 transcoder_name(pipe_config->master_transcoder),
9376 pipe_config->sync_mode_slaves_mask);
9378 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
9379 pipe_config->bigjoiner_slave ? "slave" :
9380 pipe_config->bigjoiner ? "master" : "no");
9382 if (pipe_config->has_pch_encoder)
9383 intel_dump_m_n_config(pipe_config, "fdi",
9384 pipe_config->fdi_lanes,
9385 &pipe_config->fdi_m_n);
9387 if (intel_crtc_has_dp_encoder(pipe_config)) {
9388 intel_dump_m_n_config(pipe_config, "dp m_n",
9389 pipe_config->lane_count, &pipe_config->dp_m_n);
9390 if (pipe_config->has_drrs)
9391 intel_dump_m_n_config(pipe_config, "dp m2_n2",
9392 pipe_config->lane_count,
9393 &pipe_config->dp_m2_n2);
9396 drm_dbg_kms(&dev_priv->drm,
9397 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
9398 pipe_config->has_audio, pipe_config->has_infoframe,
9399 pipe_config->infoframes.enable);
9401 if (pipe_config->infoframes.enable &
9402 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
9403 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
9404 pipe_config->infoframes.gcp);
9405 if (pipe_config->infoframes.enable &
9406 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
9407 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
9408 if (pipe_config->infoframes.enable &
9409 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
9410 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
9411 if (pipe_config->infoframes.enable &
9412 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
9413 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
9414 if (pipe_config->infoframes.enable &
9415 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
9416 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
9417 if (pipe_config->infoframes.enable &
9418 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
9419 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
9420 if (pipe_config->infoframes.enable &
9421 intel_hdmi_infoframe_enable(DP_SDP_VSC))
9422 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
9424 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
9425 yesno(pipe_config->vrr.enable),
9426 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
9427 pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
9428 intel_vrr_vmin_vblank_start(pipe_config),
9429 intel_vrr_vmax_vblank_start(pipe_config));
9431 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
9432 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
9433 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
9434 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
9435 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
9436 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
9437 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
9438 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
9439 drm_dbg_kms(&dev_priv->drm,
9440 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
9441 pipe_config->port_clock,
9442 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
9443 pipe_config->pixel_rate);
9445 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
9446 pipe_config->linetime, pipe_config->ips_linetime);
9448 if (INTEL_GEN(dev_priv) >= 9)
9449 drm_dbg_kms(&dev_priv->drm,
9450 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
9452 pipe_config->scaler_state.scaler_users,
9453 pipe_config->scaler_state.scaler_id);
9455 if (HAS_GMCH(dev_priv))
9456 drm_dbg_kms(&dev_priv->drm,
9457 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9458 pipe_config->gmch_pfit.control,
9459 pipe_config->gmch_pfit.pgm_ratios,
9460 pipe_config->gmch_pfit.lvds_border_bits);
9462 drm_dbg_kms(&dev_priv->drm,
9463 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
9464 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
9465 enableddisabled(pipe_config->pch_pfit.enabled),
9466 yesno(pipe_config->pch_pfit.force_thru));
9468 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
9469 pipe_config->ips_enabled, pipe_config->double_wide);
9471 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
9473 if (IS_CHERRYVIEW(dev_priv))
9474 drm_dbg_kms(&dev_priv->drm,
9475 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
9476 pipe_config->cgm_mode, pipe_config->gamma_mode,
9477 pipe_config->gamma_enable, pipe_config->csc_enable);
9479 drm_dbg_kms(&dev_priv->drm,
9480 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
9481 pipe_config->csc_mode, pipe_config->gamma_mode,
9482 pipe_config->gamma_enable, pipe_config->csc_enable);
9484 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
9485 pipe_config->hw.degamma_lut ?
9486 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
9487 pipe_config->hw.gamma_lut ?
9488 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
9494 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9495 if (plane->pipe == crtc->pipe)
9496 intel_dump_plane_state(plane_state);
9500 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
9502 struct drm_device *dev = state->base.dev;
9503 struct drm_connector *connector;
9504 struct drm_connector_list_iter conn_iter;
9505 unsigned int used_ports = 0;
9506 unsigned int used_mst_ports = 0;
9510 * We're going to peek into connector->state,
9511 * hence connection_mutex must be held.
9513 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
9516 * Walk the connector list instead of the encoder
9517 * list to detect the problem on ddi platforms
9518 * where there's just one encoder per digital port.
9520 drm_connector_list_iter_begin(dev, &conn_iter);
9521 drm_for_each_connector_iter(connector, &conn_iter) {
9522 struct drm_connector_state *connector_state;
9523 struct intel_encoder *encoder;
9526 drm_atomic_get_new_connector_state(&state->base,
9528 if (!connector_state)
9529 connector_state = connector->state;
9531 if (!connector_state->best_encoder)
9534 encoder = to_intel_encoder(connector_state->best_encoder);
9536 drm_WARN_ON(dev, !connector_state->crtc);
9538 switch (encoder->type) {
9539 case INTEL_OUTPUT_DDI:
9540 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
9543 case INTEL_OUTPUT_DP:
9544 case INTEL_OUTPUT_HDMI:
9545 case INTEL_OUTPUT_EDP:
9546 /* the same port mustn't appear more than once */
9547 if (used_ports & BIT(encoder->port))
9550 used_ports |= BIT(encoder->port);
9552 case INTEL_OUTPUT_DP_MST:
9560 drm_connector_list_iter_end(&conn_iter);
9562 /* can't mix MST and SST/HDMI on the same port */
9563 if (used_ports & used_mst_ports)
9570 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
9571 struct intel_crtc_state *crtc_state)
9573 const struct intel_crtc_state *from_crtc_state = crtc_state;
9575 if (crtc_state->bigjoiner_slave) {
9576 from_crtc_state = intel_atomic_get_new_crtc_state(state,
9577 crtc_state->bigjoiner_linked_crtc);
9579 /* No need to copy state if the master state is unchanged */
9580 if (!from_crtc_state)
9584 intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
9588 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
9589 struct intel_crtc_state *crtc_state)
9591 crtc_state->hw.enable = crtc_state->uapi.enable;
9592 crtc_state->hw.active = crtc_state->uapi.active;
9593 crtc_state->hw.mode = crtc_state->uapi.mode;
9594 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
9595 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
9597 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
9600 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
9602 if (crtc_state->bigjoiner_slave)
9605 crtc_state->uapi.enable = crtc_state->hw.enable;
9606 crtc_state->uapi.active = crtc_state->hw.active;
9607 drm_WARN_ON(crtc_state->uapi.crtc->dev,
9608 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
9610 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
9611 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
9613 /* copy color blobs to uapi */
9614 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
9615 crtc_state->hw.degamma_lut);
9616 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
9617 crtc_state->hw.gamma_lut);
9618 drm_property_replace_blob(&crtc_state->uapi.ctm,
9619 crtc_state->hw.ctm);
9623 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
9624 const struct intel_crtc_state *from_crtc_state)
9626 struct intel_crtc_state *saved_state;
9627 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9629 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
9633 saved_state->uapi = crtc_state->uapi;
9634 saved_state->scaler_state = crtc_state->scaler_state;
9635 saved_state->shared_dpll = crtc_state->shared_dpll;
9636 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
9637 saved_state->crc_enabled = crtc_state->crc_enabled;
9639 intel_crtc_free_hw_state(crtc_state);
9640 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
9643 /* Re-init hw state */
9644 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
9645 crtc_state->hw.enable = from_crtc_state->hw.enable;
9646 crtc_state->hw.active = from_crtc_state->hw.active;
9647 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
9648 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
9651 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
9652 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
9653 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
9654 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
9655 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
9656 crtc_state->bigjoiner_slave = true;
9657 crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
9658 crtc_state->has_audio = false;
9664 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
9665 struct intel_crtc_state *crtc_state)
9667 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9668 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9669 struct intel_crtc_state *saved_state;
9671 saved_state = intel_crtc_state_alloc(crtc);
9675 /* free the old crtc_state->hw members */
9676 intel_crtc_free_hw_state(crtc_state);
9678 /* FIXME: before the switch to atomic started, a new pipe_config was
9679 * kzalloc'd. Code that depends on any field being zero should be
9680 * fixed, so that the crtc_state can be safely duplicated. For now,
9681 * only fields that are know to not cause problems are preserved. */
9683 saved_state->uapi = crtc_state->uapi;
9684 saved_state->scaler_state = crtc_state->scaler_state;
9685 saved_state->shared_dpll = crtc_state->shared_dpll;
9686 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
9687 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
9688 sizeof(saved_state->icl_port_dplls));
9689 saved_state->crc_enabled = crtc_state->crc_enabled;
9690 if (IS_G4X(dev_priv) ||
9691 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9692 saved_state->wm = crtc_state->wm;
9694 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
9697 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
9703 intel_modeset_pipe_config(struct intel_atomic_state *state,
9704 struct intel_crtc_state *pipe_config)
9706 struct drm_crtc *crtc = pipe_config->uapi.crtc;
9707 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
9708 struct drm_connector *connector;
9709 struct drm_connector_state *connector_state;
9710 int base_bpp, ret, i;
9713 pipe_config->cpu_transcoder =
9714 (enum transcoder) to_intel_crtc(crtc)->pipe;
9717 * Sanitize sync polarity flags based on requested ones. If neither
9718 * positive or negative polarity is requested, treat this as meaning
9719 * negative polarity.
9721 if (!(pipe_config->hw.adjusted_mode.flags &
9722 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9723 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9725 if (!(pipe_config->hw.adjusted_mode.flags &
9726 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9727 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9729 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9734 base_bpp = pipe_config->pipe_bpp;
9737 * Determine the real pipe dimensions. Note that stereo modes can
9738 * increase the actual pipe size due to the frame doubling and
9739 * insertion of additional space for blanks between the frame. This
9740 * is stored in the crtc timings. We use the requested mode to do this
9741 * computation to clearly distinguish it from the adjusted mode, which
9742 * can be changed by the connectors in the below retry loop.
9744 drm_mode_get_hv_timing(&pipe_config->hw.mode,
9745 &pipe_config->pipe_src_w,
9746 &pipe_config->pipe_src_h);
9748 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
9749 struct intel_encoder *encoder =
9750 to_intel_encoder(connector_state->best_encoder);
9752 if (connector_state->crtc != crtc)
9755 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
9756 drm_dbg_kms(&i915->drm,
9757 "rejecting invalid cloning configuration\n");
9762 * Determine output_types before calling the .compute_config()
9763 * hooks so that the hooks can use this information safely.
9765 if (encoder->compute_output_type)
9766 pipe_config->output_types |=
9767 BIT(encoder->compute_output_type(encoder, pipe_config,
9770 pipe_config->output_types |= BIT(encoder->type);
9774 /* Ensure the port clock defaults are reset when retrying. */
9775 pipe_config->port_clock = 0;
9776 pipe_config->pixel_multiplier = 1;
9778 /* Fill in default crtc timings, allow encoders to overwrite them. */
9779 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
9780 CRTC_STEREO_DOUBLE);
9782 /* Pass our mode to the connectors and the CRTC to give them a chance to
9783 * adjust it according to limitations or connector properties, and also
9784 * a chance to reject the mode entirely.
9786 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
9787 struct intel_encoder *encoder =
9788 to_intel_encoder(connector_state->best_encoder);
9790 if (connector_state->crtc != crtc)
9793 ret = encoder->compute_config(encoder, pipe_config,
9796 if (ret != -EDEADLK)
9797 drm_dbg_kms(&i915->drm,
9798 "Encoder config failure: %d\n",
9804 /* Set default port clock if not overwritten by the encoder. Needs to be
9805 * done afterwards in case the encoder adjusts the mode. */
9806 if (!pipe_config->port_clock)
9807 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
9808 * pipe_config->pixel_multiplier;
9810 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9811 if (ret == -EDEADLK)
9814 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
9818 if (ret == I915_DISPLAY_CONFIG_RETRY) {
9819 if (drm_WARN(&i915->drm, !retry,
9820 "loop in pipe configuration computation\n"))
9823 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
9828 /* Dithering seems to not pass-through bits correctly when it should, so
9829 * only enable it on 6bpc panels and when its not a compliance
9830 * test requesting 6bpc video pattern.
9832 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
9833 !pipe_config->dither_force_disable;
9834 drm_dbg_kms(&i915->drm,
9835 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
9836 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9842 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
9844 struct intel_atomic_state *state =
9845 to_intel_atomic_state(crtc_state->uapi.state);
9846 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9847 struct drm_connector_state *conn_state;
9848 struct drm_connector *connector;
9851 for_each_new_connector_in_state(&state->base, connector,
9853 struct intel_encoder *encoder =
9854 to_intel_encoder(conn_state->best_encoder);
9857 if (conn_state->crtc != &crtc->base ||
9858 !encoder->compute_config_late)
9861 ret = encoder->compute_config_late(encoder, crtc_state,
9870 bool intel_fuzzy_clock_check(int clock1, int clock2)
9874 if (clock1 == clock2)
9877 if (!clock1 || !clock2)
9880 diff = abs(clock1 - clock2);
9882 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9889 intel_compare_m_n(unsigned int m, unsigned int n,
9890 unsigned int m2, unsigned int n2,
9893 if (m == m2 && n == n2)
9896 if (exact || !m || !n || !m2 || !n2)
9899 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
9906 } else if (n < n2) {
9916 return intel_fuzzy_clock_check(m, m2);
9920 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
9921 const struct intel_link_m_n *m2_n2,
9924 return m_n->tu == m2_n2->tu &&
9925 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
9926 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
9927 intel_compare_m_n(m_n->link_m, m_n->link_n,
9928 m2_n2->link_m, m2_n2->link_n, exact);
9932 intel_compare_infoframe(const union hdmi_infoframe *a,
9933 const union hdmi_infoframe *b)
9935 return memcmp(a, b, sizeof(*a)) == 0;
9939 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
9940 const struct drm_dp_vsc_sdp *b)
9942 return memcmp(a, b, sizeof(*a)) == 0;
9946 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
9947 bool fastset, const char *name,
9948 const union hdmi_infoframe *a,
9949 const union hdmi_infoframe *b)
9952 if (!drm_debug_enabled(DRM_UT_KMS))
9955 drm_dbg_kms(&dev_priv->drm,
9956 "fastset mismatch in %s infoframe\n", name);
9957 drm_dbg_kms(&dev_priv->drm, "expected:\n");
9958 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
9959 drm_dbg_kms(&dev_priv->drm, "found:\n");
9960 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
9962 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
9963 drm_err(&dev_priv->drm, "expected:\n");
9964 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
9965 drm_err(&dev_priv->drm, "found:\n");
9966 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
9971 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
9972 bool fastset, const char *name,
9973 const struct drm_dp_vsc_sdp *a,
9974 const struct drm_dp_vsc_sdp *b)
9977 if (!drm_debug_enabled(DRM_UT_KMS))
9980 drm_dbg_kms(&dev_priv->drm,
9981 "fastset mismatch in %s dp sdp\n", name);
9982 drm_dbg_kms(&dev_priv->drm, "expected:\n");
9983 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
9984 drm_dbg_kms(&dev_priv->drm, "found:\n");
9985 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
9987 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
9988 drm_err(&dev_priv->drm, "expected:\n");
9989 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
9990 drm_err(&dev_priv->drm, "found:\n");
9991 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
9995 static void __printf(4, 5)
9996 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
9997 const char *name, const char *format, ...)
9999 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
10000 struct va_format vaf;
10003 va_start(args, format);
10008 drm_dbg_kms(&i915->drm,
10009 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
10010 crtc->base.base.id, crtc->base.name, name, &vaf);
10012 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
10013 crtc->base.base.id, crtc->base.name, name, &vaf);
10018 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
10020 if (dev_priv->params.fastboot != -1)
10021 return dev_priv->params.fastboot;
10023 /* Enable fastboot by default on Skylake and newer */
10024 if (INTEL_GEN(dev_priv) >= 9)
10027 /* Enable fastboot by default on VLV and CHV */
10028 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10031 /* Disabled by default on all others */
10036 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
10037 const struct intel_crtc_state *pipe_config,
10040 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
10041 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
10044 bool fixup_inherited = fastset &&
10045 current_config->inherited && !pipe_config->inherited;
10047 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
10048 drm_dbg_kms(&dev_priv->drm,
10049 "initial modeset and fastboot not set\n");
10053 #define PIPE_CONF_CHECK_X(name) do { \
10054 if (current_config->name != pipe_config->name) { \
10055 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10056 "(expected 0x%08x, found 0x%08x)", \
10057 current_config->name, \
10058 pipe_config->name); \
10063 #define PIPE_CONF_CHECK_I(name) do { \
10064 if (current_config->name != pipe_config->name) { \
10065 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10066 "(expected %i, found %i)", \
10067 current_config->name, \
10068 pipe_config->name); \
10073 #define PIPE_CONF_CHECK_BOOL(name) do { \
10074 if (current_config->name != pipe_config->name) { \
10075 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10076 "(expected %s, found %s)", \
10077 yesno(current_config->name), \
10078 yesno(pipe_config->name)); \
10084 * Checks state where we only read out the enabling, but not the entire
10085 * state itself (like full infoframes or ELD for audio). These states
10086 * require a full modeset on bootup to fix up.
10088 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
10089 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
10090 PIPE_CONF_CHECK_BOOL(name); \
10092 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10093 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
10094 yesno(current_config->name), \
10095 yesno(pipe_config->name)); \
10100 #define PIPE_CONF_CHECK_P(name) do { \
10101 if (current_config->name != pipe_config->name) { \
10102 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10103 "(expected %p, found %p)", \
10104 current_config->name, \
10105 pipe_config->name); \
10110 #define PIPE_CONF_CHECK_M_N(name) do { \
10111 if (!intel_compare_link_m_n(¤t_config->name, \
10112 &pipe_config->name,\
10114 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10115 "(expected tu %i gmch %i/%i link %i/%i, " \
10116 "found tu %i, gmch %i/%i link %i/%i)", \
10117 current_config->name.tu, \
10118 current_config->name.gmch_m, \
10119 current_config->name.gmch_n, \
10120 current_config->name.link_m, \
10121 current_config->name.link_n, \
10122 pipe_config->name.tu, \
10123 pipe_config->name.gmch_m, \
10124 pipe_config->name.gmch_n, \
10125 pipe_config->name.link_m, \
10126 pipe_config->name.link_n); \
10131 /* This is required for BDW+ where there is only one set of registers for
10132 * switching between high and low RR.
10133 * This macro can be used whenever a comparison has to be made between one
10134 * hw state and multiple sw state variables.
10136 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
10137 if (!intel_compare_link_m_n(¤t_config->name, \
10138 &pipe_config->name, !fastset) && \
10139 !intel_compare_link_m_n(¤t_config->alt_name, \
10140 &pipe_config->name, !fastset)) { \
10141 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10142 "(expected tu %i gmch %i/%i link %i/%i, " \
10143 "or tu %i gmch %i/%i link %i/%i, " \
10144 "found tu %i, gmch %i/%i link %i/%i)", \
10145 current_config->name.tu, \
10146 current_config->name.gmch_m, \
10147 current_config->name.gmch_n, \
10148 current_config->name.link_m, \
10149 current_config->name.link_n, \
10150 current_config->alt_name.tu, \
10151 current_config->alt_name.gmch_m, \
10152 current_config->alt_name.gmch_n, \
10153 current_config->alt_name.link_m, \
10154 current_config->alt_name.link_n, \
10155 pipe_config->name.tu, \
10156 pipe_config->name.gmch_m, \
10157 pipe_config->name.gmch_n, \
10158 pipe_config->name.link_m, \
10159 pipe_config->name.link_n); \
10164 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
10165 if ((current_config->name ^ pipe_config->name) & (mask)) { \
10166 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10167 "(%x) (expected %i, found %i)", \
10169 current_config->name & (mask), \
10170 pipe_config->name & (mask)); \
10175 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
10176 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10177 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10178 "(expected %i, found %i)", \
10179 current_config->name, \
10180 pipe_config->name); \
10185 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
10186 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
10187 &pipe_config->infoframes.name)) { \
10188 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
10189 ¤t_config->infoframes.name, \
10190 &pipe_config->infoframes.name); \
10195 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
10196 if (!current_config->has_psr && !pipe_config->has_psr && \
10197 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
10198 &pipe_config->infoframes.name)) { \
10199 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
10200 ¤t_config->infoframes.name, \
10201 &pipe_config->infoframes.name); \
10206 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
10207 if (current_config->name1 != pipe_config->name1) { \
10208 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
10209 "(expected %i, found %i, won't compare lut values)", \
10210 current_config->name1, \
10211 pipe_config->name1); \
10214 if (!intel_color_lut_equal(current_config->name2, \
10215 pipe_config->name2, pipe_config->name1, \
10216 bit_precision)) { \
10217 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
10218 "hw_state doesn't match sw_state"); \
10224 #define PIPE_CONF_QUIRK(quirk) \
10225 ((current_config->quirks | pipe_config->quirks) & (quirk))
10227 PIPE_CONF_CHECK_I(cpu_transcoder);
10229 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
10230 PIPE_CONF_CHECK_I(fdi_lanes);
10231 PIPE_CONF_CHECK_M_N(fdi_m_n);
10233 PIPE_CONF_CHECK_I(lane_count);
10234 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
10236 if (INTEL_GEN(dev_priv) < 8) {
10237 PIPE_CONF_CHECK_M_N(dp_m_n);
10239 if (current_config->has_drrs)
10240 PIPE_CONF_CHECK_M_N(dp_m2_n2);
10242 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
10244 PIPE_CONF_CHECK_X(output_types);
10246 /* FIXME do the readout properly and get rid of this quirk */
10247 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
10248 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
10249 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
10250 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
10251 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
10252 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
10253 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
10255 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
10256 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
10257 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
10258 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
10259 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
10260 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
10262 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
10263 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
10264 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
10265 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
10266 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
10267 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
10269 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
10270 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
10271 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
10272 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
10273 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
10274 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
10276 PIPE_CONF_CHECK_I(pixel_multiplier);
10278 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10279 DRM_MODE_FLAG_INTERLACE);
10281 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10282 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10283 DRM_MODE_FLAG_PHSYNC);
10284 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10285 DRM_MODE_FLAG_NHSYNC);
10286 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10287 DRM_MODE_FLAG_PVSYNC);
10288 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10289 DRM_MODE_FLAG_NVSYNC);
10293 PIPE_CONF_CHECK_I(output_format);
10294 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
10295 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
10296 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10297 PIPE_CONF_CHECK_BOOL(limited_color_range);
10299 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
10300 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
10301 PIPE_CONF_CHECK_BOOL(has_infoframe);
10302 /* FIXME do the readout properly and get rid of this quirk */
10303 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
10304 PIPE_CONF_CHECK_BOOL(fec_enable);
10306 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
10308 PIPE_CONF_CHECK_X(gmch_pfit.control);
10309 /* pfit ratios are autocomputed by the hw on gen4+ */
10310 if (INTEL_GEN(dev_priv) < 4)
10311 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
10312 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
10315 * Changing the EDP transcoder input mux
10316 * (A_ONOFF vs. A_ON) requires a full modeset.
10318 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
10321 PIPE_CONF_CHECK_I(pipe_src_w);
10322 PIPE_CONF_CHECK_I(pipe_src_h);
10324 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
10325 if (current_config->pch_pfit.enabled) {
10326 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
10327 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
10328 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
10329 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
10332 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
10333 /* FIXME do the readout properly and get rid of this quirk */
10334 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
10335 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
10337 PIPE_CONF_CHECK_X(gamma_mode);
10338 if (IS_CHERRYVIEW(dev_priv))
10339 PIPE_CONF_CHECK_X(cgm_mode);
10341 PIPE_CONF_CHECK_X(csc_mode);
10342 PIPE_CONF_CHECK_BOOL(gamma_enable);
10343 PIPE_CONF_CHECK_BOOL(csc_enable);
10345 PIPE_CONF_CHECK_I(linetime);
10346 PIPE_CONF_CHECK_I(ips_linetime);
10348 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
10350 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
10353 PIPE_CONF_CHECK_BOOL(double_wide);
10355 PIPE_CONF_CHECK_P(shared_dpll);
10357 /* FIXME do the readout properly and get rid of this quirk */
10358 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
10359 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10360 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10361 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10362 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10363 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10364 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
10365 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10366 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10367 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10368 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
10369 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
10370 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
10371 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
10372 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
10373 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
10374 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
10375 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
10376 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
10377 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
10378 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
10379 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
10380 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
10381 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
10382 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
10383 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
10384 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
10385 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
10386 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
10387 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
10388 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
10389 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
10391 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
10392 PIPE_CONF_CHECK_X(dsi_pll.div);
10394 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
10395 PIPE_CONF_CHECK_I(pipe_bpp);
10397 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
10398 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
10399 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10401 PIPE_CONF_CHECK_I(min_voltage_level);
10404 PIPE_CONF_CHECK_X(infoframes.enable);
10405 PIPE_CONF_CHECK_X(infoframes.gcp);
10406 PIPE_CONF_CHECK_INFOFRAME(avi);
10407 PIPE_CONF_CHECK_INFOFRAME(spd);
10408 PIPE_CONF_CHECK_INFOFRAME(hdmi);
10409 PIPE_CONF_CHECK_INFOFRAME(drm);
10410 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
10412 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
10413 PIPE_CONF_CHECK_I(master_transcoder);
10414 PIPE_CONF_CHECK_BOOL(bigjoiner);
10415 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
10416 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
10418 PIPE_CONF_CHECK_I(dsc.compression_enable);
10419 PIPE_CONF_CHECK_I(dsc.dsc_split);
10420 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
10422 PIPE_CONF_CHECK_I(mst_master_transcoder);
10424 PIPE_CONF_CHECK_BOOL(vrr.enable);
10425 PIPE_CONF_CHECK_I(vrr.vmin);
10426 PIPE_CONF_CHECK_I(vrr.vmax);
10427 PIPE_CONF_CHECK_I(vrr.flipline);
10428 PIPE_CONF_CHECK_I(vrr.pipeline_full);
10430 #undef PIPE_CONF_CHECK_X
10431 #undef PIPE_CONF_CHECK_I
10432 #undef PIPE_CONF_CHECK_BOOL
10433 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
10434 #undef PIPE_CONF_CHECK_P
10435 #undef PIPE_CONF_CHECK_FLAGS
10436 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
10437 #undef PIPE_CONF_CHECK_COLOR_LUT
10438 #undef PIPE_CONF_QUIRK
10443 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
10444 const struct intel_crtc_state *pipe_config)
10446 if (pipe_config->has_pch_encoder) {
10447 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10448 &pipe_config->fdi_m_n);
10449 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
10452 * FDI already provided one idea for the dotclock.
10453 * Yell if the encoder disagrees.
10455 drm_WARN(&dev_priv->drm,
10456 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
10457 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10458 fdi_dotclock, dotclock);
10462 static void verify_wm_state(struct intel_crtc *crtc,
10463 struct intel_crtc_state *new_crtc_state)
10465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10466 struct skl_hw_state {
10467 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
10468 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
10469 struct skl_pipe_wm wm;
10471 struct skl_pipe_wm *sw_wm;
10472 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
10473 u8 hw_enabled_slices;
10474 const enum pipe pipe = crtc->pipe;
10475 int plane, level, max_level = ilk_wm_max_level(dev_priv);
10477 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
10480 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
10484 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
10485 sw_wm = &new_crtc_state->wm.skl.optimal;
10487 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
10489 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
10491 if (INTEL_GEN(dev_priv) >= 11 &&
10492 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
10493 drm_err(&dev_priv->drm,
10494 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
10495 dev_priv->dbuf.enabled_slices,
10496 hw_enabled_slices);
10499 for_each_universal_plane(dev_priv, pipe, plane) {
10500 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
10502 hw_plane_wm = &hw->wm.planes[plane];
10503 sw_plane_wm = &sw_wm->planes[plane];
10506 for (level = 0; level <= max_level; level++) {
10507 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
10508 &sw_plane_wm->wm[level]) ||
10509 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
10510 &sw_plane_wm->sagv_wm0)))
10513 drm_err(&dev_priv->drm,
10514 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10515 pipe_name(pipe), plane + 1, level,
10516 sw_plane_wm->wm[level].plane_en,
10517 sw_plane_wm->wm[level].plane_res_b,
10518 sw_plane_wm->wm[level].plane_res_l,
10519 hw_plane_wm->wm[level].plane_en,
10520 hw_plane_wm->wm[level].plane_res_b,
10521 hw_plane_wm->wm[level].plane_res_l);
10524 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
10525 &sw_plane_wm->trans_wm)) {
10526 drm_err(&dev_priv->drm,
10527 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10528 pipe_name(pipe), plane + 1,
10529 sw_plane_wm->trans_wm.plane_en,
10530 sw_plane_wm->trans_wm.plane_res_b,
10531 sw_plane_wm->trans_wm.plane_res_l,
10532 hw_plane_wm->trans_wm.plane_en,
10533 hw_plane_wm->trans_wm.plane_res_b,
10534 hw_plane_wm->trans_wm.plane_res_l);
10538 hw_ddb_entry = &hw->ddb_y[plane];
10539 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
10541 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
10542 drm_err(&dev_priv->drm,
10543 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
10544 pipe_name(pipe), plane + 1,
10545 sw_ddb_entry->start, sw_ddb_entry->end,
10546 hw_ddb_entry->start, hw_ddb_entry->end);
10552 * If the cursor plane isn't active, we may not have updated it's ddb
10553 * allocation. In that case since the ddb allocation will be updated
10554 * once the plane becomes visible, we can skip this check
10557 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
10559 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
10560 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
10563 for (level = 0; level <= max_level; level++) {
10564 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
10565 &sw_plane_wm->wm[level]) ||
10566 (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
10567 &sw_plane_wm->sagv_wm0)))
10570 drm_err(&dev_priv->drm,
10571 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10572 pipe_name(pipe), level,
10573 sw_plane_wm->wm[level].plane_en,
10574 sw_plane_wm->wm[level].plane_res_b,
10575 sw_plane_wm->wm[level].plane_res_l,
10576 hw_plane_wm->wm[level].plane_en,
10577 hw_plane_wm->wm[level].plane_res_b,
10578 hw_plane_wm->wm[level].plane_res_l);
10581 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
10582 &sw_plane_wm->trans_wm)) {
10583 drm_err(&dev_priv->drm,
10584 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10586 sw_plane_wm->trans_wm.plane_en,
10587 sw_plane_wm->trans_wm.plane_res_b,
10588 sw_plane_wm->trans_wm.plane_res_l,
10589 hw_plane_wm->trans_wm.plane_en,
10590 hw_plane_wm->trans_wm.plane_res_b,
10591 hw_plane_wm->trans_wm.plane_res_l);
10595 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
10596 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
10598 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
10599 drm_err(&dev_priv->drm,
10600 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
10602 sw_ddb_entry->start, sw_ddb_entry->end,
10603 hw_ddb_entry->start, hw_ddb_entry->end);
10611 verify_connector_state(struct intel_atomic_state *state,
10612 struct intel_crtc *crtc)
10614 struct drm_connector *connector;
10615 struct drm_connector_state *new_conn_state;
10618 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
10619 struct drm_encoder *encoder = connector->encoder;
10620 struct intel_crtc_state *crtc_state = NULL;
10622 if (new_conn_state->crtc != &crtc->base)
10626 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
10628 intel_connector_verify_state(crtc_state, new_conn_state);
10630 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
10631 "connector's atomic encoder doesn't match legacy encoder\n");
10636 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
10638 struct intel_encoder *encoder;
10639 struct drm_connector *connector;
10640 struct drm_connector_state *old_conn_state, *new_conn_state;
10643 for_each_intel_encoder(&dev_priv->drm, encoder) {
10644 bool enabled = false, found = false;
10647 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
10648 encoder->base.base.id,
10649 encoder->base.name);
10651 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
10652 new_conn_state, i) {
10653 if (old_conn_state->best_encoder == &encoder->base)
10656 if (new_conn_state->best_encoder != &encoder->base)
10658 found = enabled = true;
10660 I915_STATE_WARN(new_conn_state->crtc !=
10661 encoder->base.crtc,
10662 "connector's crtc doesn't match encoder crtc\n");
10668 I915_STATE_WARN(!!encoder->base.crtc != enabled,
10669 "encoder's enabled state mismatch "
10670 "(expected %i, found %i)\n",
10671 !!encoder->base.crtc, enabled);
10673 if (!encoder->base.crtc) {
10676 active = encoder->get_hw_state(encoder, &pipe);
10677 I915_STATE_WARN(active,
10678 "encoder detached but still enabled on pipe %c.\n",
10685 verify_crtc_state(struct intel_crtc *crtc,
10686 struct intel_crtc_state *old_crtc_state,
10687 struct intel_crtc_state *new_crtc_state)
10689 struct drm_device *dev = crtc->base.dev;
10690 struct drm_i915_private *dev_priv = to_i915(dev);
10691 struct intel_encoder *encoder;
10692 struct intel_crtc_state *pipe_config = old_crtc_state;
10693 struct drm_atomic_state *state = old_crtc_state->uapi.state;
10694 struct intel_crtc *master = crtc;
10696 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
10697 intel_crtc_free_hw_state(old_crtc_state);
10698 intel_crtc_state_reset(old_crtc_state, crtc);
10699 old_crtc_state->uapi.state = state;
10701 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
10704 pipe_config->hw.enable = new_crtc_state->hw.enable;
10706 intel_crtc_get_pipe_config(pipe_config);
10708 /* we keep both pipes enabled on 830 */
10709 if (IS_I830(dev_priv) && pipe_config->hw.active)
10710 pipe_config->hw.active = new_crtc_state->hw.active;
10712 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
10713 "crtc active state doesn't match with hw state "
10714 "(expected %i, found %i)\n",
10715 new_crtc_state->hw.active, pipe_config->hw.active);
10717 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
10718 "transitional active state does not match atomic hw state "
10719 "(expected %i, found %i)\n",
10720 new_crtc_state->hw.active, crtc->active);
10722 if (new_crtc_state->bigjoiner_slave)
10723 master = new_crtc_state->bigjoiner_linked_crtc;
10725 for_each_encoder_on_crtc(dev, &master->base, encoder) {
10729 active = encoder->get_hw_state(encoder, &pipe);
10730 I915_STATE_WARN(active != new_crtc_state->hw.active,
10731 "[ENCODER:%i] active %i with crtc active %i\n",
10732 encoder->base.base.id, active,
10733 new_crtc_state->hw.active);
10735 I915_STATE_WARN(active && master->pipe != pipe,
10736 "Encoder connected to wrong pipe %c\n",
10740 intel_encoder_get_config(encoder, pipe_config);
10743 if (!new_crtc_state->hw.active)
10746 intel_pipe_config_sanity_check(dev_priv, pipe_config);
10748 if (!intel_pipe_config_compare(new_crtc_state,
10749 pipe_config, false)) {
10750 I915_STATE_WARN(1, "pipe state doesn't match!\n");
10751 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
10752 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
10757 intel_verify_planes(struct intel_atomic_state *state)
10759 struct intel_plane *plane;
10760 const struct intel_plane_state *plane_state;
10763 for_each_new_intel_plane_in_state(state, plane,
10765 assert_plane(plane, plane_state->planar_slave ||
10766 plane_state->uapi.visible);
10770 verify_single_dpll_state(struct drm_i915_private *dev_priv,
10771 struct intel_shared_dpll *pll,
10772 struct intel_crtc *crtc,
10773 struct intel_crtc_state *new_crtc_state)
10775 struct intel_dpll_hw_state dpll_hw_state;
10776 unsigned int crtc_mask;
10779 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10781 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
10783 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
10785 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
10786 I915_STATE_WARN(!pll->on && pll->active_mask,
10787 "pll in active use but not on in sw tracking\n");
10788 I915_STATE_WARN(pll->on && !pll->active_mask,
10789 "pll is on but not used by any active crtc\n");
10790 I915_STATE_WARN(pll->on != active,
10791 "pll on state mismatch (expected %i, found %i)\n",
10796 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
10797 "more active pll users than references: %x vs %x\n",
10798 pll->active_mask, pll->state.crtc_mask);
10803 crtc_mask = drm_crtc_mask(&crtc->base);
10805 if (new_crtc_state->hw.active)
10806 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
10807 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
10808 pipe_name(crtc->pipe), pll->active_mask);
10810 I915_STATE_WARN(pll->active_mask & crtc_mask,
10811 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
10812 pipe_name(crtc->pipe), pll->active_mask);
10814 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
10815 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
10816 crtc_mask, pll->state.crtc_mask);
10818 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
10820 sizeof(dpll_hw_state)),
10821 "pll hw state mismatch\n");
10825 verify_shared_dpll_state(struct intel_crtc *crtc,
10826 struct intel_crtc_state *old_crtc_state,
10827 struct intel_crtc_state *new_crtc_state)
10829 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10831 if (new_crtc_state->shared_dpll)
10832 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
10834 if (old_crtc_state->shared_dpll &&
10835 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
10836 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
10837 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
10839 I915_STATE_WARN(pll->active_mask & crtc_mask,
10840 "pll active mismatch (didn't expect pipe %c in active mask)\n",
10841 pipe_name(crtc->pipe));
10842 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
10843 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
10844 pipe_name(crtc->pipe));
10849 intel_modeset_verify_crtc(struct intel_crtc *crtc,
10850 struct intel_atomic_state *state,
10851 struct intel_crtc_state *old_crtc_state,
10852 struct intel_crtc_state *new_crtc_state)
10854 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
10857 verify_wm_state(crtc, new_crtc_state);
10858 verify_connector_state(state, crtc);
10859 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
10860 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
10864 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
10868 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
10869 verify_single_dpll_state(dev_priv,
10870 &dev_priv->dpll.shared_dplls[i],
10875 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
10876 struct intel_atomic_state *state)
10878 verify_encoder_state(dev_priv, state);
10879 verify_connector_state(state, NULL);
10880 verify_disabled_dpll_state(dev_priv);
10884 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
10886 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10887 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10888 struct drm_display_mode adjusted_mode =
10889 crtc_state->hw.adjusted_mode;
10891 if (crtc_state->vrr.enable) {
10892 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
10893 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
10894 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
10895 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
10898 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
10900 crtc->mode_flags = crtc_state->mode_flags;
10903 * The scanline counter increments at the leading edge of hsync.
10905 * On most platforms it starts counting from vtotal-1 on the
10906 * first active line. That means the scanline counter value is
10907 * always one less than what we would expect. Ie. just after
10908 * start of vblank, which also occurs at start of hsync (on the
10909 * last active line), the scanline counter will read vblank_start-1.
10911 * On gen2 the scanline counter starts counting from 1 instead
10912 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10913 * to keep the value positive), instead of adding one.
10915 * On HSW+ the behaviour of the scanline counter depends on the output
10916 * type. For DP ports it behaves like most other platforms, but on HDMI
10917 * there's an extra 1 line difference. So we need to add two instead of
10918 * one to the value.
10920 * On VLV/CHV DSI the scanline counter would appear to increment
10921 * approx. 1/3 of a scanline before start of vblank. Unfortunately
10922 * that means we can't tell whether we're in vblank or not while
10923 * we're on that particular line. We must still set scanline_offset
10924 * to 1 so that the vblank timestamps come out correct when we query
10925 * the scanline counter from within the vblank interrupt handler.
10926 * However if queried just before the start of vblank we'll get an
10927 * answer that's slightly in the future.
10929 if (IS_GEN(dev_priv, 2)) {
10932 vtotal = adjusted_mode.crtc_vtotal;
10933 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10936 crtc->scanline_offset = vtotal - 1;
10937 } else if (HAS_DDI(dev_priv) &&
10938 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
10939 crtc->scanline_offset = 2;
10941 crtc->scanline_offset = 1;
10945 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
10947 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10948 struct intel_crtc_state *new_crtc_state;
10949 struct intel_crtc *crtc;
10952 if (!dev_priv->display.crtc_compute_clock)
10955 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10956 if (!intel_crtc_needs_modeset(new_crtc_state))
10959 intel_release_shared_dplls(state, crtc);
10964 * This implements the workaround described in the "notes" section of the mode
10965 * set sequence documentation. When going from no pipes or single pipe to
10966 * multiple pipes, and planes are enabled after the pipe, we need to wait at
10967 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
10969 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
10971 struct intel_crtc_state *crtc_state;
10972 struct intel_crtc *crtc;
10973 struct intel_crtc_state *first_crtc_state = NULL;
10974 struct intel_crtc_state *other_crtc_state = NULL;
10975 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
10978 /* look at all crtc's that are going to be enabled in during modeset */
10979 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10980 if (!crtc_state->hw.active ||
10981 !intel_crtc_needs_modeset(crtc_state))
10984 if (first_crtc_state) {
10985 other_crtc_state = crtc_state;
10988 first_crtc_state = crtc_state;
10989 first_pipe = crtc->pipe;
10993 /* No workaround needed? */
10994 if (!first_crtc_state)
10997 /* w/a possibly needed, check how many crtc's are already enabled. */
10998 for_each_intel_crtc(state->base.dev, crtc) {
10999 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
11000 if (IS_ERR(crtc_state))
11001 return PTR_ERR(crtc_state);
11003 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
11005 if (!crtc_state->hw.active ||
11006 intel_crtc_needs_modeset(crtc_state))
11009 /* 2 or more enabled crtcs means no need for w/a */
11010 if (enabled_pipe != INVALID_PIPE)
11013 enabled_pipe = crtc->pipe;
11016 if (enabled_pipe != INVALID_PIPE)
11017 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
11018 else if (other_crtc_state)
11019 other_crtc_state->hsw_workaround_pipe = first_pipe;
11024 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
11027 const struct intel_crtc_state *crtc_state;
11028 struct intel_crtc *crtc;
11031 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11032 if (crtc_state->hw.active)
11033 active_pipes |= BIT(crtc->pipe);
11035 active_pipes &= ~BIT(crtc->pipe);
11038 return active_pipes;
11041 static int intel_modeset_checks(struct intel_atomic_state *state)
11043 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11045 state->modeset = true;
11047 if (IS_HASWELL(dev_priv))
11048 return hsw_mode_set_planes_workaround(state);
11054 * Handle calculation of various watermark data at the end of the atomic check
11055 * phase. The code here should be run after the per-crtc and per-plane 'check'
11056 * handlers to ensure that all derived state has been updated.
11058 static int calc_watermark_data(struct intel_atomic_state *state)
11060 struct drm_device *dev = state->base.dev;
11061 struct drm_i915_private *dev_priv = to_i915(dev);
11063 /* Is there platform-specific watermark information to calculate? */
11064 if (dev_priv->display.compute_global_watermarks)
11065 return dev_priv->display.compute_global_watermarks(state);
11070 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
11071 struct intel_crtc_state *new_crtc_state)
11073 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
11076 new_crtc_state->uapi.mode_changed = false;
11077 new_crtc_state->update_pipe = true;
11080 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
11081 struct intel_crtc_state *new_crtc_state)
11084 * If we're not doing the full modeset we want to
11085 * keep the current M/N values as they may be
11086 * sufficiently different to the computed values
11087 * to cause problems.
11089 * FIXME: should really copy more fuzzy state here
11091 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
11092 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
11093 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
11094 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
11097 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
11098 struct intel_crtc *crtc,
11101 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11102 struct intel_plane *plane;
11104 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
11105 struct intel_plane_state *plane_state;
11107 if ((plane_ids_mask & BIT(plane->id)) == 0)
11110 plane_state = intel_atomic_get_plane_state(state, plane);
11111 if (IS_ERR(plane_state))
11112 return PTR_ERR(plane_state);
11118 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
11119 struct intel_crtc *crtc)
11121 const struct intel_crtc_state *old_crtc_state =
11122 intel_atomic_get_old_crtc_state(state, crtc);
11123 const struct intel_crtc_state *new_crtc_state =
11124 intel_atomic_get_new_crtc_state(state, crtc);
11126 return intel_crtc_add_planes_to_state(state, crtc,
11127 old_crtc_state->enabled_planes |
11128 new_crtc_state->enabled_planes);
11131 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
11133 /* See {hsw,vlv,ivb}_plane_ratio() */
11134 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
11135 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11136 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
11139 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
11140 struct intel_crtc *crtc,
11141 struct intel_crtc *other)
11143 const struct intel_plane_state *plane_state;
11144 struct intel_plane *plane;
11148 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11149 if (plane->pipe == crtc->pipe)
11150 plane_ids |= BIT(plane->id);
11153 return intel_crtc_add_planes_to_state(state, other, plane_ids);
11156 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
11158 const struct intel_crtc_state *crtc_state;
11159 struct intel_crtc *crtc;
11162 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11165 if (!crtc_state->bigjoiner)
11168 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
11169 crtc_state->bigjoiner_linked_crtc);
11177 static int intel_atomic_check_planes(struct intel_atomic_state *state)
11179 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11180 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11181 struct intel_plane_state *plane_state;
11182 struct intel_plane *plane;
11183 struct intel_crtc *crtc;
11186 ret = icl_add_linked_planes(state);
11190 ret = intel_bigjoiner_add_affected_planes(state);
11194 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11195 ret = intel_plane_atomic_check(state, plane);
11197 drm_dbg_atomic(&dev_priv->drm,
11198 "[PLANE:%d:%s] atomic driver check failed\n",
11199 plane->base.base.id, plane->base.name);
11204 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11205 new_crtc_state, i) {
11206 u8 old_active_planes, new_active_planes;
11208 ret = icl_check_nv12_planes(new_crtc_state);
11213 * On some platforms the number of active planes affects
11214 * the planes' minimum cdclk calculation. Add such planes
11215 * to the state before we compute the minimum cdclk.
11217 if (!active_planes_affects_min_cdclk(dev_priv))
11220 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
11221 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
11224 * Not only the number of planes, but if the plane configuration had
11225 * changed might already mean we need to recompute min CDCLK,
11226 * because different planes might consume different amount of Dbuf bandwidth
11227 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
11229 if (old_active_planes == new_active_planes)
11232 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
11240 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
11241 bool *need_cdclk_calc)
11243 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11244 const struct intel_cdclk_state *old_cdclk_state;
11245 const struct intel_cdclk_state *new_cdclk_state;
11246 struct intel_plane_state *plane_state;
11247 struct intel_bw_state *new_bw_state;
11248 struct intel_plane *plane;
11254 * active_planes bitmask has been updated, and potentially
11255 * affected planes are part of the state. We can now
11256 * compute the minimum cdclk for each plane.
11258 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11259 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
11264 old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
11265 new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
11267 if (new_cdclk_state &&
11268 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
11269 *need_cdclk_calc = true;
11271 ret = dev_priv->display.bw_calc_min_cdclk(state);
11275 new_bw_state = intel_atomic_get_new_bw_state(state);
11277 if (!new_cdclk_state || !new_bw_state)
11280 for_each_pipe(dev_priv, pipe) {
11281 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
11284 * Currently do this change only if we need to increase
11286 if (new_bw_state->min_cdclk > min_cdclk)
11287 *need_cdclk_calc = true;
11293 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
11295 struct intel_crtc_state *crtc_state;
11296 struct intel_crtc *crtc;
11299 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11300 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
11303 ret = intel_crtc_atomic_check(state, crtc);
11305 drm_dbg_atomic(&i915->drm,
11306 "[CRTC:%d:%s] atomic driver check failed\n",
11307 crtc->base.base.id, crtc->base.name);
11315 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
11318 const struct intel_crtc_state *new_crtc_state;
11319 struct intel_crtc *crtc;
11322 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11323 if (new_crtc_state->hw.enable &&
11324 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
11325 intel_crtc_needs_modeset(new_crtc_state))
11332 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
11333 struct intel_crtc *crtc,
11334 struct intel_crtc_state *old_crtc_state,
11335 struct intel_crtc_state *new_crtc_state)
11337 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11338 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
11339 struct intel_crtc *slave, *master;
11341 /* slave being enabled, is master is still claiming this crtc? */
11342 if (old_crtc_state->bigjoiner_slave) {
11344 master = old_crtc_state->bigjoiner_linked_crtc;
11345 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
11346 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
11350 if (!new_crtc_state->bigjoiner)
11353 if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
11354 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
11355 "CRTC + 1 to be used, doesn't exist\n",
11356 crtc->base.base.id, crtc->base.name);
11360 slave = new_crtc_state->bigjoiner_linked_crtc =
11361 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
11362 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
11364 if (IS_ERR(slave_crtc_state))
11365 return PTR_ERR(slave_crtc_state);
11367 /* master being enabled, slave was already configured? */
11368 if (slave_crtc_state->uapi.enable)
11371 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
11372 slave->base.base.id, slave->base.name);
11374 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
11377 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
11378 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
11379 slave->base.base.id, slave->base.name,
11380 master->base.base.id, master->base.name);
11384 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
11385 struct intel_crtc_state *master_crtc_state)
11387 struct intel_crtc_state *slave_crtc_state =
11388 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
11390 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
11391 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
11392 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
11393 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
11397 * DOC: asynchronous flip implementation
11399 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
11400 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
11401 * Correspondingly, support is currently added for primary plane only.
11403 * Async flip can only change the plane surface address, so anything else
11404 * changing is rejected from the intel_atomic_check_async() function.
11405 * Once this check is cleared, flip done interrupt is enabled using
11406 * the intel_crtc_enable_flip_done() function.
11408 * As soon as the surface address register is written, flip done interrupt is
11409 * generated and the requested events are sent to the usersapce in the interrupt
11410 * handler itself. The timestamp and sequence sent during the flip done event
11411 * correspond to the last vblank and have no relation to the actual time when
11412 * the flip done event was sent.
11414 static int intel_atomic_check_async(struct intel_atomic_state *state)
11416 struct drm_i915_private *i915 = to_i915(state->base.dev);
11417 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11418 const struct intel_plane_state *new_plane_state, *old_plane_state;
11419 struct intel_crtc *crtc;
11420 struct intel_plane *plane;
11423 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11424 new_crtc_state, i) {
11425 if (intel_crtc_needs_modeset(new_crtc_state)) {
11426 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
11430 if (!new_crtc_state->hw.active) {
11431 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
11434 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
11435 drm_dbg_kms(&i915->drm,
11436 "Active planes cannot be changed during async flip\n");
11441 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
11442 new_plane_state, i) {
11444 * TODO: Async flip is only supported through the page flip IOCTL
11445 * as of now. So support currently added for primary plane only.
11446 * Support for other planes on platforms on which supports
11447 * this(vlv/chv and icl+) should be added when async flip is
11448 * enabled in the atomic IOCTL path.
11450 if (!plane->async_flip)
11454 * FIXME: This check is kept generic for all platforms.
11455 * Need to verify this for all gen9 and gen10 platforms to enable
11456 * this selectively if required.
11458 switch (new_plane_state->hw.fb->modifier) {
11459 case I915_FORMAT_MOD_X_TILED:
11460 case I915_FORMAT_MOD_Y_TILED:
11461 case I915_FORMAT_MOD_Yf_TILED:
11464 drm_dbg_kms(&i915->drm,
11465 "Linear memory/CCS does not support async flips\n");
11469 if (old_plane_state->color_plane[0].stride !=
11470 new_plane_state->color_plane[0].stride) {
11471 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
11475 if (old_plane_state->hw.fb->modifier !=
11476 new_plane_state->hw.fb->modifier) {
11477 drm_dbg_kms(&i915->drm,
11478 "Framebuffer modifiers cannot be changed in async flip\n");
11482 if (old_plane_state->hw.fb->format !=
11483 new_plane_state->hw.fb->format) {
11484 drm_dbg_kms(&i915->drm,
11485 "Framebuffer format cannot be changed in async flip\n");
11489 if (old_plane_state->hw.rotation !=
11490 new_plane_state->hw.rotation) {
11491 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
11495 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
11496 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
11497 drm_dbg_kms(&i915->drm,
11498 "Plane size/co-ordinates cannot be changed in async flip\n");
11502 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
11503 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
11507 if (old_plane_state->hw.pixel_blend_mode !=
11508 new_plane_state->hw.pixel_blend_mode) {
11509 drm_dbg_kms(&i915->drm,
11510 "Pixel blend mode cannot be changed in async flip\n");
11514 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
11515 drm_dbg_kms(&i915->drm,
11516 "Color encoding cannot be changed in async flip\n");
11520 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
11521 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
11529 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
11531 struct intel_crtc_state *crtc_state;
11532 struct intel_crtc *crtc;
11535 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11536 struct intel_crtc_state *linked_crtc_state;
11537 struct intel_crtc *linked_crtc;
11540 if (!crtc_state->bigjoiner)
11543 linked_crtc = crtc_state->bigjoiner_linked_crtc;
11544 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
11545 if (IS_ERR(linked_crtc_state))
11546 return PTR_ERR(linked_crtc_state);
11548 if (!intel_crtc_needs_modeset(crtc_state))
11551 linked_crtc_state->uapi.mode_changed = true;
11553 ret = drm_atomic_add_affected_connectors(&state->base,
11554 &linked_crtc->base);
11558 ret = intel_atomic_add_affected_planes(state, linked_crtc);
11563 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11564 /* Kill old bigjoiner link, we may re-establish afterwards */
11565 if (intel_crtc_needs_modeset(crtc_state) &&
11566 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
11567 kill_bigjoiner_slave(state, crtc_state);
11574 * intel_atomic_check - validate state object
11576 * @_state: state to validate
11578 static int intel_atomic_check(struct drm_device *dev,
11579 struct drm_atomic_state *_state)
11581 struct drm_i915_private *dev_priv = to_i915(dev);
11582 struct intel_atomic_state *state = to_intel_atomic_state(_state);
11583 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11584 struct intel_crtc *crtc;
11586 bool any_ms = false;
11588 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11589 new_crtc_state, i) {
11590 if (new_crtc_state->inherited != old_crtc_state->inherited)
11591 new_crtc_state->uapi.mode_changed = true;
11594 intel_vrr_check_modeset(state);
11596 ret = drm_atomic_helper_check_modeset(dev, &state->base);
11600 ret = intel_bigjoiner_add_affected_crtcs(state);
11604 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11605 new_crtc_state, i) {
11606 if (!intel_crtc_needs_modeset(new_crtc_state)) {
11608 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
11613 if (!new_crtc_state->uapi.enable) {
11614 if (!new_crtc_state->bigjoiner_slave) {
11615 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
11621 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
11625 ret = intel_modeset_pipe_config(state, new_crtc_state);
11629 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
11635 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11636 new_crtc_state, i) {
11637 if (!intel_crtc_needs_modeset(new_crtc_state))
11640 ret = intel_modeset_pipe_config_late(new_crtc_state);
11644 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
11648 * Check if fastset is allowed by external dependencies like other
11649 * pipes and transcoders.
11651 * Right now it only forces a fullmodeset when the MST master
11652 * transcoder did not changed but the pipe of the master transcoder
11653 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
11654 * in case of port synced crtcs, if one of the synced crtcs
11655 * needs a full modeset, all other synced crtcs should be
11656 * forced a full modeset.
11658 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11659 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
11662 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
11663 enum transcoder master = new_crtc_state->mst_master_transcoder;
11665 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
11666 new_crtc_state->uapi.mode_changed = true;
11667 new_crtc_state->update_pipe = false;
11671 if (is_trans_port_sync_mode(new_crtc_state)) {
11672 u8 trans = new_crtc_state->sync_mode_slaves_mask;
11674 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
11675 trans |= BIT(new_crtc_state->master_transcoder);
11677 if (intel_cpu_transcoders_need_modeset(state, trans)) {
11678 new_crtc_state->uapi.mode_changed = true;
11679 new_crtc_state->update_pipe = false;
11683 if (new_crtc_state->bigjoiner) {
11684 struct intel_crtc_state *linked_crtc_state =
11685 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
11687 if (intel_crtc_needs_modeset(linked_crtc_state)) {
11688 new_crtc_state->uapi.mode_changed = true;
11689 new_crtc_state->update_pipe = false;
11694 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11695 new_crtc_state, i) {
11696 if (intel_crtc_needs_modeset(new_crtc_state)) {
11701 if (!new_crtc_state->update_pipe)
11704 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
11707 if (any_ms && !check_digital_port_conflicts(state)) {
11708 drm_dbg_kms(&dev_priv->drm,
11709 "rejecting conflicting digital port configuration\n");
11714 ret = drm_dp_mst_atomic_check(&state->base);
11718 ret = intel_atomic_check_planes(state);
11722 intel_fbc_choose_crtc(dev_priv, state);
11723 ret = calc_watermark_data(state);
11727 ret = intel_bw_atomic_check(state);
11731 ret = intel_atomic_check_cdclk(state, &any_ms);
11736 ret = intel_modeset_checks(state);
11740 ret = intel_modeset_calc_cdclk(state);
11744 intel_modeset_clear_plls(state);
11747 ret = intel_atomic_check_crtcs(state);
11751 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11752 new_crtc_state, i) {
11753 if (new_crtc_state->uapi.async_flip) {
11754 ret = intel_atomic_check_async(state);
11759 if (!intel_crtc_needs_modeset(new_crtc_state) &&
11760 !new_crtc_state->update_pipe)
11763 intel_dump_pipe_config(new_crtc_state, state,
11764 intel_crtc_needs_modeset(new_crtc_state) ?
11765 "[modeset]" : "[fastset]");
11771 if (ret == -EDEADLK)
11775 * FIXME would probably be nice to know which crtc specifically
11776 * caused the failure, in cases where we can pinpoint it.
11778 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11780 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
11785 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
11787 struct intel_crtc_state *crtc_state;
11788 struct intel_crtc *crtc;
11791 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
11795 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11796 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
11798 if (mode_changed || crtc_state->update_pipe ||
11799 crtc_state->uapi.color_mgmt_changed) {
11800 intel_dsb_prepare(crtc_state);
11807 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
11808 struct intel_crtc_state *crtc_state)
11810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11812 if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
11813 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
11815 if (crtc_state->has_pch_encoder) {
11816 enum pipe pch_transcoder =
11817 intel_crtc_pch_transcoder(crtc);
11819 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
11823 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
11824 const struct intel_crtc_state *new_crtc_state)
11826 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
11827 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11830 * Update pipe size and adjust fitter if needed: the reason for this is
11831 * that in compute_mode_changes we check the native mode (not the pfit
11832 * mode) to see if we can flip rather than do a full mode set. In the
11833 * fastboot case, we'll flip, but if we don't update the pipesrc and
11834 * pfit state, we'll end up with a big fb scanned out into the wrong
11837 intel_set_pipe_src_size(new_crtc_state);
11839 /* on skylake this is done by detaching scalers */
11840 if (INTEL_GEN(dev_priv) >= 9) {
11841 skl_detach_scalers(new_crtc_state);
11843 if (new_crtc_state->pch_pfit.enabled)
11844 skl_pfit_enable(new_crtc_state);
11845 } else if (HAS_PCH_SPLIT(dev_priv)) {
11846 if (new_crtc_state->pch_pfit.enabled)
11847 ilk_pfit_enable(new_crtc_state);
11848 else if (old_crtc_state->pch_pfit.enabled)
11849 ilk_pfit_disable(old_crtc_state);
11853 * The register is supposedly single buffered so perhaps
11854 * not 100% correct to do this here. But SKL+ calculate
11855 * this based on the adjust pixel rate so pfit changes do
11856 * affect it and so it must be updated for fastsets.
11857 * HSW/BDW only really need this here for fastboot, after
11858 * that the value should not change without a full modeset.
11860 if (INTEL_GEN(dev_priv) >= 9 ||
11861 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11862 hsw_set_linetime_wm(new_crtc_state);
11864 if (INTEL_GEN(dev_priv) >= 11)
11865 icl_set_pipe_chicken(crtc);
11868 static void commit_pipe_config(struct intel_atomic_state *state,
11869 struct intel_crtc *crtc)
11871 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11872 const struct intel_crtc_state *old_crtc_state =
11873 intel_atomic_get_old_crtc_state(state, crtc);
11874 const struct intel_crtc_state *new_crtc_state =
11875 intel_atomic_get_new_crtc_state(state, crtc);
11876 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11879 * During modesets pipe configuration was programmed as the
11880 * CRTC was enabled.
11883 if (new_crtc_state->uapi.color_mgmt_changed ||
11884 new_crtc_state->update_pipe)
11885 intel_color_commit(new_crtc_state);
11887 if (INTEL_GEN(dev_priv) >= 9)
11888 skl_detach_scalers(new_crtc_state);
11890 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
11891 bdw_set_pipemisc(new_crtc_state);
11893 if (new_crtc_state->update_pipe)
11894 intel_pipe_fastset(old_crtc_state, new_crtc_state);
11896 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
11899 if (dev_priv->display.atomic_update_watermarks)
11900 dev_priv->display.atomic_update_watermarks(state, crtc);
11903 static void intel_enable_crtc(struct intel_atomic_state *state,
11904 struct intel_crtc *crtc)
11906 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11907 const struct intel_crtc_state *new_crtc_state =
11908 intel_atomic_get_new_crtc_state(state, crtc);
11910 if (!intel_crtc_needs_modeset(new_crtc_state))
11913 intel_crtc_update_active_timings(new_crtc_state);
11915 dev_priv->display.crtc_enable(state, crtc);
11917 if (new_crtc_state->bigjoiner_slave)
11920 /* vblanks work again, re-enable pipe CRC. */
11921 intel_crtc_enable_pipe_crc(crtc);
11924 static void intel_update_crtc(struct intel_atomic_state *state,
11925 struct intel_crtc *crtc)
11927 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11928 const struct intel_crtc_state *old_crtc_state =
11929 intel_atomic_get_old_crtc_state(state, crtc);
11930 struct intel_crtc_state *new_crtc_state =
11931 intel_atomic_get_new_crtc_state(state, crtc);
11932 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11935 if (new_crtc_state->preload_luts &&
11936 (new_crtc_state->uapi.color_mgmt_changed ||
11937 new_crtc_state->update_pipe))
11938 intel_color_load_luts(new_crtc_state);
11940 intel_pre_plane_update(state, crtc);
11942 if (new_crtc_state->update_pipe)
11943 intel_encoders_update_pipe(state, crtc);
11946 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
11947 intel_fbc_disable(crtc);
11949 intel_fbc_enable(state, crtc);
11951 /* Perform vblank evasion around commit operation */
11952 intel_pipe_update_start(new_crtc_state);
11954 commit_pipe_config(state, crtc);
11956 if (INTEL_GEN(dev_priv) >= 9)
11957 skl_update_planes_on_crtc(state, crtc);
11959 i9xx_update_planes_on_crtc(state, crtc);
11961 intel_pipe_update_end(new_crtc_state);
11964 * We usually enable FIFO underrun interrupts as part of the
11965 * CRTC enable sequence during modesets. But when we inherit a
11966 * valid pipe configuration from the BIOS we need to take care
11967 * of enabling them on the CRTC's first fastset.
11969 if (new_crtc_state->update_pipe && !modeset &&
11970 old_crtc_state->inherited)
11971 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
11974 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
11975 struct intel_crtc_state *old_crtc_state,
11976 struct intel_crtc_state *new_crtc_state,
11977 struct intel_crtc *crtc)
11979 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11981 drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
11983 intel_crtc_disable_planes(state, crtc);
11986 * We still need special handling for disabling bigjoiner master
11987 * and slaves since for slave we do not have encoder or plls
11988 * so we dont need to disable those.
11990 if (old_crtc_state->bigjoiner) {
11991 intel_crtc_disable_planes(state,
11992 old_crtc_state->bigjoiner_linked_crtc);
11993 old_crtc_state->bigjoiner_linked_crtc->active = false;
11997 * We need to disable pipe CRC before disabling the pipe,
11998 * or we race against vblank off.
12000 intel_crtc_disable_pipe_crc(crtc);
12002 dev_priv->display.crtc_disable(state, crtc);
12003 crtc->active = false;
12004 intel_fbc_disable(crtc);
12005 intel_disable_shared_dpll(old_crtc_state);
12007 /* FIXME unify this for all platforms */
12008 if (!new_crtc_state->hw.active &&
12009 !HAS_GMCH(dev_priv) &&
12010 dev_priv->display.initial_watermarks)
12011 dev_priv->display.initial_watermarks(state, crtc);
12014 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
12016 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
12017 struct intel_crtc *crtc;
12021 /* Only disable port sync and MST slaves */
12022 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12023 new_crtc_state, i) {
12024 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
12027 if (!old_crtc_state->hw.active)
12030 /* In case of Transcoder port Sync master slave CRTCs can be
12031 * assigned in any order and we need to make sure that
12032 * slave CRTCs are disabled first and then master CRTC since
12033 * Slave vblanks are masked till Master Vblanks.
12035 if (!is_trans_port_sync_slave(old_crtc_state) &&
12036 !intel_dp_mst_is_slave_trans(old_crtc_state))
12039 intel_pre_plane_update(state, crtc);
12040 intel_old_crtc_state_disables(state, old_crtc_state,
12041 new_crtc_state, crtc);
12042 handled |= BIT(crtc->pipe);
12045 /* Disable everything else left on */
12046 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12047 new_crtc_state, i) {
12048 if (!intel_crtc_needs_modeset(new_crtc_state) ||
12049 (handled & BIT(crtc->pipe)) ||
12050 old_crtc_state->bigjoiner_slave)
12053 intel_pre_plane_update(state, crtc);
12054 if (old_crtc_state->bigjoiner) {
12055 struct intel_crtc *slave =
12056 old_crtc_state->bigjoiner_linked_crtc;
12058 intel_pre_plane_update(state, slave);
12061 if (old_crtc_state->hw.active)
12062 intel_old_crtc_state_disables(state, old_crtc_state,
12063 new_crtc_state, crtc);
12067 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
12069 struct intel_crtc_state *new_crtc_state;
12070 struct intel_crtc *crtc;
12073 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12074 if (!new_crtc_state->hw.active)
12077 intel_enable_crtc(state, crtc);
12078 intel_update_crtc(state, crtc);
12082 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
12084 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
12085 struct intel_crtc *crtc;
12086 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
12087 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
12088 u8 update_pipes = 0, modeset_pipes = 0;
12091 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12092 enum pipe pipe = crtc->pipe;
12094 if (!new_crtc_state->hw.active)
12097 /* ignore allocations for crtc's that have been turned off. */
12098 if (!intel_crtc_needs_modeset(new_crtc_state)) {
12099 entries[pipe] = old_crtc_state->wm.skl.ddb;
12100 update_pipes |= BIT(pipe);
12102 modeset_pipes |= BIT(pipe);
12107 * Whenever the number of active pipes changes, we need to make sure we
12108 * update the pipes in the right order so that their ddb allocations
12109 * never overlap with each other between CRTC updates. Otherwise we'll
12110 * cause pipe underruns and other bad stuff.
12112 * So first lets enable all pipes that do not need a fullmodeset as
12113 * those don't have any external dependency.
12115 while (update_pipes) {
12116 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12117 new_crtc_state, i) {
12118 enum pipe pipe = crtc->pipe;
12120 if ((update_pipes & BIT(pipe)) == 0)
12123 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
12124 entries, I915_MAX_PIPES, pipe))
12127 entries[pipe] = new_crtc_state->wm.skl.ddb;
12128 update_pipes &= ~BIT(pipe);
12130 intel_update_crtc(state, crtc);
12133 * If this is an already active pipe, it's DDB changed,
12134 * and this isn't the last pipe that needs updating
12135 * then we need to wait for a vblank to pass for the
12136 * new ddb allocation to take effect.
12138 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
12139 &old_crtc_state->wm.skl.ddb) &&
12140 (update_pipes | modeset_pipes))
12141 intel_wait_for_vblank(dev_priv, pipe);
12145 update_pipes = modeset_pipes;
12148 * Enable all pipes that needs a modeset and do not depends on other
12151 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12152 enum pipe pipe = crtc->pipe;
12154 if ((modeset_pipes & BIT(pipe)) == 0)
12157 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
12158 is_trans_port_sync_master(new_crtc_state) ||
12159 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
12162 modeset_pipes &= ~BIT(pipe);
12164 intel_enable_crtc(state, crtc);
12168 * Then we enable all remaining pipes that depend on other
12169 * pipes: MST slaves and port sync masters, big joiner master
12171 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12172 enum pipe pipe = crtc->pipe;
12174 if ((modeset_pipes & BIT(pipe)) == 0)
12177 modeset_pipes &= ~BIT(pipe);
12179 intel_enable_crtc(state, crtc);
12183 * Finally we do the plane updates/etc. for all pipes that got enabled.
12185 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12186 enum pipe pipe = crtc->pipe;
12188 if ((update_pipes & BIT(pipe)) == 0)
12191 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
12192 entries, I915_MAX_PIPES, pipe));
12194 entries[pipe] = new_crtc_state->wm.skl.ddb;
12195 update_pipes &= ~BIT(pipe);
12197 intel_update_crtc(state, crtc);
12200 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
12201 drm_WARN_ON(&dev_priv->drm, update_pipes);
12204 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12206 struct intel_atomic_state *state, *next;
12207 struct llist_node *freed;
12209 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12210 llist_for_each_entry_safe(state, next, freed, freed)
12211 drm_atomic_state_put(&state->base);
12214 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12216 struct drm_i915_private *dev_priv =
12217 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12219 intel_atomic_helper_free_state(dev_priv);
12222 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12224 struct wait_queue_entry wait_fence, wait_reset;
12225 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12227 init_wait_entry(&wait_fence, 0);
12228 init_wait_entry(&wait_reset, 0);
12230 prepare_to_wait(&intel_state->commit_ready.wait,
12231 &wait_fence, TASK_UNINTERRUPTIBLE);
12232 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
12233 I915_RESET_MODESET),
12234 &wait_reset, TASK_UNINTERRUPTIBLE);
12237 if (i915_sw_fence_done(&intel_state->commit_ready) ||
12238 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
12243 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12244 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
12245 I915_RESET_MODESET),
12249 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
12251 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
12252 struct intel_crtc *crtc;
12255 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12257 intel_dsb_cleanup(old_crtc_state);
12260 static void intel_atomic_cleanup_work(struct work_struct *work)
12262 struct intel_atomic_state *state =
12263 container_of(work, struct intel_atomic_state, base.commit_work);
12264 struct drm_i915_private *i915 = to_i915(state->base.dev);
12266 intel_cleanup_dsbs(state);
12267 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
12268 drm_atomic_helper_commit_cleanup_done(&state->base);
12269 drm_atomic_state_put(&state->base);
12271 intel_atomic_helper_free_state(i915);
12274 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
12276 struct drm_i915_private *i915 = to_i915(state->base.dev);
12277 struct intel_plane *plane;
12278 struct intel_plane_state *plane_state;
12281 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12282 struct drm_framebuffer *fb = plane_state->hw.fb;
12286 fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
12290 * The layout of the fast clear color value expected by HW
12291 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
12292 * - 4 x 4 bytes per-channel value
12293 * (in surface type specific float/int format provided by the fb user)
12294 * - 8 bytes native color value used by the display
12295 * (converted/written by GPU during a fast clear operation using the
12296 * above per-channel values)
12298 * The commit's FB prepare hook already ensured that FB obj is pinned and the
12299 * caller made sure that the object is synced wrt. the related color clear value
12302 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
12303 fb->offsets[2] + 16,
12304 &plane_state->ccval,
12305 sizeof(plane_state->ccval));
12306 /* The above could only fail if the FB obj has an unexpected backing store type. */
12307 drm_WARN_ON(&i915->drm, ret);
12311 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
12313 struct drm_device *dev = state->base.dev;
12314 struct drm_i915_private *dev_priv = to_i915(dev);
12315 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
12316 struct intel_crtc *crtc;
12317 u64 put_domains[I915_MAX_PIPES] = {};
12318 intel_wakeref_t wakeref = 0;
12321 intel_atomic_commit_fence_wait(state);
12323 drm_atomic_helper_wait_for_dependencies(&state->base);
12325 if (state->modeset)
12326 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12328 intel_atomic_prepare_plane_clear_colors(state);
12330 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12331 new_crtc_state, i) {
12332 if (intel_crtc_needs_modeset(new_crtc_state) ||
12333 new_crtc_state->update_pipe) {
12335 put_domains[crtc->pipe] =
12336 modeset_get_crtc_power_domains(new_crtc_state);
12340 intel_commit_modeset_disables(state);
12342 /* FIXME: Eventually get rid of our crtc->config pointer */
12343 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
12344 crtc->config = new_crtc_state;
12346 if (state->modeset) {
12347 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
12349 intel_set_cdclk_pre_plane_update(state);
12351 intel_modeset_verify_disabled(dev_priv, state);
12354 intel_sagv_pre_plane_update(state);
12356 /* Complete the events for pipes that have now been disabled */
12357 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12358 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
12360 /* Complete events for now disable pipes here. */
12361 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
12362 spin_lock_irq(&dev->event_lock);
12363 drm_crtc_send_vblank_event(&crtc->base,
12364 new_crtc_state->uapi.event);
12365 spin_unlock_irq(&dev->event_lock);
12367 new_crtc_state->uapi.event = NULL;
12371 if (state->modeset)
12372 intel_encoders_update_prepare(state);
12374 intel_dbuf_pre_plane_update(state);
12376 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12377 if (new_crtc_state->uapi.async_flip)
12378 intel_crtc_enable_flip_done(state, crtc);
12381 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12382 dev_priv->display.commit_modeset_enables(state);
12384 if (state->modeset) {
12385 intel_encoders_update_complete(state);
12387 intel_set_cdclk_post_plane_update(state);
12390 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12391 * already, but still need the state for the delayed optimization. To
12393 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12394 * - schedule that vblank worker _before_ calling hw_done
12395 * - at the start of commit_tail, cancel it _synchrously
12396 * - switch over to the vblank wait helper in the core after that since
12397 * we don't need out special handling any more.
12399 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
12401 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12402 if (new_crtc_state->uapi.async_flip)
12403 intel_crtc_disable_flip_done(state, crtc);
12405 if (new_crtc_state->hw.active &&
12406 !intel_crtc_needs_modeset(new_crtc_state) &&
12407 !new_crtc_state->preload_luts &&
12408 (new_crtc_state->uapi.color_mgmt_changed ||
12409 new_crtc_state->update_pipe))
12410 intel_color_load_luts(new_crtc_state);
12414 * Now that the vblank has passed, we can go ahead and program the
12415 * optimal watermarks on platforms that need two-step watermark
12418 * TODO: Move this (and other cleanup) to an async worker eventually.
12420 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12421 new_crtc_state, i) {
12423 * Gen2 reports pipe underruns whenever all planes are disabled.
12424 * So re-enable underrun reporting after some planes get enabled.
12426 * We do this before .optimize_watermarks() so that we have a
12427 * chance of catching underruns with the intermediate watermarks
12428 * vs. the new plane configuration.
12430 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
12431 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
12433 if (dev_priv->display.optimize_watermarks)
12434 dev_priv->display.optimize_watermarks(state, crtc);
12437 intel_dbuf_post_plane_update(state);
12439 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12440 intel_post_plane_update(state, crtc);
12442 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
12444 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12447 * DSB cleanup is done in cleanup_work aligning with framebuffer
12448 * cleanup. So copy and reset the dsb structure to sync with
12449 * commit_done and later do dsb cleanup in cleanup_work.
12451 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
12454 /* Underruns don't always raise interrupts, so check manually */
12455 intel_check_cpu_fifo_underruns(dev_priv);
12456 intel_check_pch_fifo_underruns(dev_priv);
12458 if (state->modeset)
12459 intel_verify_planes(state);
12461 intel_sagv_post_plane_update(state);
12463 drm_atomic_helper_commit_hw_done(&state->base);
12465 if (state->modeset) {
12466 /* As one of the primary mmio accessors, KMS has a high
12467 * likelihood of triggering bugs in unclaimed access. After we
12468 * finish modesetting, see if an error has been flagged, and if
12469 * so enable debugging for the next modeset - and hope we catch
12472 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
12473 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
12475 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
12478 * Defer the cleanup of the old state to a separate worker to not
12479 * impede the current task (userspace for blocking modesets) that
12480 * are executed inline. For out-of-line asynchronous modesets/flips,
12481 * deferring to a new worker seems overkill, but we would place a
12482 * schedule point (cond_resched()) here anyway to keep latencies
12485 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
12486 queue_work(system_highpri_wq, &state->base.commit_work);
12489 static void intel_atomic_commit_work(struct work_struct *work)
12491 struct intel_atomic_state *state =
12492 container_of(work, struct intel_atomic_state, base.commit_work);
12494 intel_atomic_commit_tail(state);
12497 static int __i915_sw_fence_call
12498 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12499 enum i915_sw_fence_notify notify)
12501 struct intel_atomic_state *state =
12502 container_of(fence, struct intel_atomic_state, commit_ready);
12505 case FENCE_COMPLETE:
12506 /* we do blocking waits in the worker, nothing to do here */
12510 struct intel_atomic_helper *helper =
12511 &to_i915(state->base.dev)->atomic_helper;
12513 if (llist_add(&state->freed, &helper->free_list))
12514 schedule_work(&helper->free_work);
12519 return NOTIFY_DONE;
12522 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
12524 struct intel_plane_state *old_plane_state, *new_plane_state;
12525 struct intel_plane *plane;
12528 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
12529 new_plane_state, i)
12530 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
12531 to_intel_frontbuffer(new_plane_state->hw.fb),
12532 plane->frontbuffer_bit);
12535 static int intel_atomic_commit(struct drm_device *dev,
12536 struct drm_atomic_state *_state,
12539 struct intel_atomic_state *state = to_intel_atomic_state(_state);
12540 struct drm_i915_private *dev_priv = to_i915(dev);
12543 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
12545 drm_atomic_state_get(&state->base);
12546 i915_sw_fence_init(&state->commit_ready,
12547 intel_atomic_commit_ready);
12550 * The intel_legacy_cursor_update() fast path takes care
12551 * of avoiding the vblank waits for simple cursor
12552 * movement and flips. For cursor on/off and size changes,
12553 * we want to perform the vblank waits so that watermark
12554 * updates happen during the correct frames. Gen9+ have
12555 * double buffered watermarks and so shouldn't need this.
12557 * Unset state->legacy_cursor_update before the call to
12558 * drm_atomic_helper_setup_commit() because otherwise
12559 * drm_atomic_helper_wait_for_flip_done() is a noop and
12560 * we get FIFO underruns because we didn't wait
12563 * FIXME doing watermarks and fb cleanup from a vblank worker
12564 * (assuming we had any) would solve these problems.
12566 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
12567 struct intel_crtc_state *new_crtc_state;
12568 struct intel_crtc *crtc;
12571 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
12572 if (new_crtc_state->wm.need_postvbl_update ||
12573 new_crtc_state->update_wm_post)
12574 state->base.legacy_cursor_update = false;
12577 ret = intel_atomic_prepare_commit(state);
12579 drm_dbg_atomic(&dev_priv->drm,
12580 "Preparing state failed with %i\n", ret);
12581 i915_sw_fence_commit(&state->commit_ready);
12582 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
12586 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
12588 ret = drm_atomic_helper_swap_state(&state->base, true);
12590 intel_atomic_swap_global_state(state);
12593 struct intel_crtc_state *new_crtc_state;
12594 struct intel_crtc *crtc;
12597 i915_sw_fence_commit(&state->commit_ready);
12599 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
12600 intel_dsb_cleanup(new_crtc_state);
12602 drm_atomic_helper_cleanup_planes(dev, &state->base);
12603 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
12606 intel_shared_dpll_swap_state(state);
12607 intel_atomic_track_fbs(state);
12609 drm_atomic_state_get(&state->base);
12610 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
12612 i915_sw_fence_commit(&state->commit_ready);
12613 if (nonblock && state->modeset) {
12614 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
12615 } else if (nonblock) {
12616 queue_work(dev_priv->flip_wq, &state->base.commit_work);
12618 if (state->modeset)
12619 flush_workqueue(dev_priv->modeset_wq);
12620 intel_atomic_commit_tail(state);
12626 struct wait_rps_boost {
12627 struct wait_queue_entry wait;
12629 struct drm_crtc *crtc;
12630 struct i915_request *request;
12633 static int do_rps_boost(struct wait_queue_entry *_wait,
12634 unsigned mode, int sync, void *key)
12636 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
12637 struct i915_request *rq = wait->request;
12640 * If we missed the vblank, but the request is already running it
12641 * is reasonable to assume that it will complete before the next
12642 * vblank without our intervention, so leave RPS alone.
12644 if (!i915_request_started(rq))
12645 intel_rps_boost(rq);
12646 i915_request_put(rq);
12648 drm_crtc_vblank_put(wait->crtc);
12650 list_del(&wait->wait.entry);
12655 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12656 struct dma_fence *fence)
12658 struct wait_rps_boost *wait;
12660 if (!dma_fence_is_i915(fence))
12663 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
12666 if (drm_crtc_vblank_get(crtc))
12669 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
12671 drm_crtc_vblank_put(crtc);
12675 wait->request = to_request(dma_fence_get(fence));
12678 wait->wait.func = do_rps_boost;
12679 wait->wait.flags = 0;
12681 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
12684 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12686 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12687 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12688 struct drm_framebuffer *fb = plane_state->hw.fb;
12689 struct i915_vma *vma;
12691 if (plane->id == PLANE_CURSOR &&
12692 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
12693 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12694 const int align = intel_cursor_alignment(dev_priv);
12697 err = i915_gem_object_attach_phys(obj, align);
12702 vma = intel_pin_and_fence_fb_obj(fb,
12703 &plane_state->view,
12704 intel_plane_uses_fence(plane_state),
12705 &plane_state->flags);
12707 return PTR_ERR(vma);
12709 plane_state->vma = vma;
12714 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
12716 struct i915_vma *vma;
12718 vma = fetch_and_zero(&old_plane_state->vma);
12720 intel_unpin_fb_vma(vma, old_plane_state->flags);
12723 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
12725 struct i915_sched_attr attr = {
12726 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
12729 i915_gem_object_wait_priority(obj, 0, &attr);
12733 * intel_prepare_plane_fb - Prepare fb for usage on plane
12734 * @_plane: drm plane to prepare for
12735 * @_new_plane_state: the plane state being prepared
12737 * Prepares a framebuffer for usage on a display plane. Generally this
12738 * involves pinning the underlying object and updating the frontbuffer tracking
12739 * bits. Some older platforms need special physical address handling for
12742 * Returns 0 on success, negative error code on failure.
12745 intel_prepare_plane_fb(struct drm_plane *_plane,
12746 struct drm_plane_state *_new_plane_state)
12748 struct intel_plane *plane = to_intel_plane(_plane);
12749 struct intel_plane_state *new_plane_state =
12750 to_intel_plane_state(_new_plane_state);
12751 struct intel_atomic_state *state =
12752 to_intel_atomic_state(new_plane_state->uapi.state);
12753 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12754 const struct intel_plane_state *old_plane_state =
12755 intel_atomic_get_old_plane_state(state, plane);
12756 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
12757 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
12761 const struct intel_crtc_state *crtc_state =
12762 intel_atomic_get_new_crtc_state(state,
12763 to_intel_crtc(old_plane_state->hw.crtc));
12765 /* Big Hammer, we also need to ensure that any pending
12766 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
12767 * current scanout is retired before unpinning the old
12768 * framebuffer. Note that we rely on userspace rendering
12769 * into the buffer attached to the pipe they are waiting
12770 * on. If not, userspace generates a GPU hang with IPEHR
12771 * point to the MI_WAIT_FOR_EVENT.
12773 * This should only fail upon a hung GPU, in which case we
12774 * can safely continue.
12776 if (intel_crtc_needs_modeset(crtc_state)) {
12777 ret = i915_sw_fence_await_reservation(&state->commit_ready,
12778 old_obj->base.resv, NULL,
12786 if (new_plane_state->uapi.fence) { /* explicit fencing */
12787 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
12788 new_plane_state->uapi.fence,
12789 i915_fence_timeout(dev_priv),
12798 ret = i915_gem_object_pin_pages(obj);
12802 ret = intel_plane_pin_fb(new_plane_state);
12804 i915_gem_object_unpin_pages(obj);
12808 fb_obj_bump_render_priority(obj);
12809 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
12811 if (!new_plane_state->uapi.fence) { /* implicit fencing */
12812 struct dma_fence *fence;
12814 ret = i915_sw_fence_await_reservation(&state->commit_ready,
12815 obj->base.resv, NULL,
12817 i915_fence_timeout(dev_priv),
12822 fence = dma_resv_get_excl_rcu(obj->base.resv);
12824 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
12826 dma_fence_put(fence);
12829 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
12830 new_plane_state->uapi.fence);
12834 * We declare pageflips to be interactive and so merit a small bias
12835 * towards upclocking to deliver the frame on time. By only changing
12836 * the RPS thresholds to sample more regularly and aim for higher
12837 * clocks we can hopefully deliver low power workloads (like kodi)
12838 * that are not quite steady state without resorting to forcing
12839 * maximum clocks following a vblank miss (see do_rps_boost()).
12841 if (!state->rps_interactive) {
12842 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
12843 state->rps_interactive = true;
12849 intel_plane_unpin_fb(new_plane_state);
12855 * intel_cleanup_plane_fb - Cleans up an fb after plane use
12856 * @plane: drm plane to clean up for
12857 * @_old_plane_state: the state from the previous modeset
12859 * Cleans up a framebuffer that has just been removed from a plane.
12862 intel_cleanup_plane_fb(struct drm_plane *plane,
12863 struct drm_plane_state *_old_plane_state)
12865 struct intel_plane_state *old_plane_state =
12866 to_intel_plane_state(_old_plane_state);
12867 struct intel_atomic_state *state =
12868 to_intel_atomic_state(old_plane_state->uapi.state);
12869 struct drm_i915_private *dev_priv = to_i915(plane->dev);
12870 struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
12875 if (state->rps_interactive) {
12876 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
12877 state->rps_interactive = false;
12880 /* Should only be called after a successful intel_prepare_plane_fb()! */
12881 intel_plane_unpin_fb(old_plane_state);
12885 * intel_plane_destroy - destroy a plane
12886 * @plane: plane to destroy
12888 * Common destruction function for all types of planes (primary, cursor,
12891 void intel_plane_destroy(struct drm_plane *plane)
12893 drm_plane_cleanup(plane);
12894 kfree(to_intel_plane(plane));
12897 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
12899 struct intel_plane *plane;
12901 for_each_intel_plane(&dev_priv->drm, plane) {
12902 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
12905 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
12910 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
12911 struct drm_file *file)
12913 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
12914 struct drm_crtc *drmmode_crtc;
12915 struct intel_crtc *crtc;
12917 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
12921 crtc = to_intel_crtc(drmmode_crtc);
12922 pipe_from_crtc_id->pipe = crtc->pipe;
12927 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
12929 struct drm_device *dev = encoder->base.dev;
12930 struct intel_encoder *source_encoder;
12931 u32 possible_clones = 0;
12933 for_each_intel_encoder(dev, source_encoder) {
12934 if (encoders_cloneable(encoder, source_encoder))
12935 possible_clones |= drm_encoder_mask(&source_encoder->base);
12938 return possible_clones;
12941 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
12943 struct drm_device *dev = encoder->base.dev;
12944 struct intel_crtc *crtc;
12945 u32 possible_crtcs = 0;
12947 for_each_intel_crtc(dev, crtc) {
12948 if (encoder->pipe_mask & BIT(crtc->pipe))
12949 possible_crtcs |= drm_crtc_mask(&crtc->base);
12952 return possible_crtcs;
12955 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
12957 if (!IS_MOBILE(dev_priv))
12960 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
12963 if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
12969 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
12971 if (INTEL_GEN(dev_priv) >= 9)
12974 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
12977 if (HAS_PCH_LPT_H(dev_priv) &&
12978 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
12981 /* DDI E can't be used if DDI A requires 4 lanes */
12982 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
12985 if (!dev_priv->vbt.int_crt_support)
12991 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
12993 struct intel_encoder *encoder;
12994 bool dpd_is_edp = false;
12996 intel_pps_unlock_regs_wa(dev_priv);
12998 if (!HAS_DISPLAY(dev_priv))
13001 if (IS_ALDERLAKE_S(dev_priv)) {
13002 intel_ddi_init(dev_priv, PORT_A);
13003 intel_ddi_init(dev_priv, PORT_TC1);
13004 intel_ddi_init(dev_priv, PORT_TC2);
13005 intel_ddi_init(dev_priv, PORT_TC3);
13006 intel_ddi_init(dev_priv, PORT_TC4);
13007 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
13008 intel_ddi_init(dev_priv, PORT_A);
13009 intel_ddi_init(dev_priv, PORT_B);
13010 intel_ddi_init(dev_priv, PORT_TC1);
13011 intel_ddi_init(dev_priv, PORT_TC2);
13012 } else if (INTEL_GEN(dev_priv) >= 12) {
13013 intel_ddi_init(dev_priv, PORT_A);
13014 intel_ddi_init(dev_priv, PORT_B);
13015 intel_ddi_init(dev_priv, PORT_TC1);
13016 intel_ddi_init(dev_priv, PORT_TC2);
13017 intel_ddi_init(dev_priv, PORT_TC3);
13018 intel_ddi_init(dev_priv, PORT_TC4);
13019 intel_ddi_init(dev_priv, PORT_TC5);
13020 intel_ddi_init(dev_priv, PORT_TC6);
13021 icl_dsi_init(dev_priv);
13022 } else if (IS_JSL_EHL(dev_priv)) {
13023 intel_ddi_init(dev_priv, PORT_A);
13024 intel_ddi_init(dev_priv, PORT_B);
13025 intel_ddi_init(dev_priv, PORT_C);
13026 intel_ddi_init(dev_priv, PORT_D);
13027 icl_dsi_init(dev_priv);
13028 } else if (IS_GEN(dev_priv, 11)) {
13029 intel_ddi_init(dev_priv, PORT_A);
13030 intel_ddi_init(dev_priv, PORT_B);
13031 intel_ddi_init(dev_priv, PORT_C);
13032 intel_ddi_init(dev_priv, PORT_D);
13033 intel_ddi_init(dev_priv, PORT_E);
13035 * On some ICL SKUs port F is not present. No strap bits for
13036 * this, so rely on VBT.
13037 * Work around broken VBTs on SKUs known to have no port F.
13039 if (IS_ICL_WITH_PORT_F(dev_priv) &&
13040 intel_bios_is_port_present(dev_priv, PORT_F))
13041 intel_ddi_init(dev_priv, PORT_F);
13043 icl_dsi_init(dev_priv);
13044 } else if (IS_GEN9_LP(dev_priv)) {
13046 * FIXME: Broxton doesn't support port detection via the
13047 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
13048 * detect the ports.
13050 intel_ddi_init(dev_priv, PORT_A);
13051 intel_ddi_init(dev_priv, PORT_B);
13052 intel_ddi_init(dev_priv, PORT_C);
13054 vlv_dsi_init(dev_priv);
13055 } else if (HAS_DDI(dev_priv)) {
13058 if (intel_ddi_crt_present(dev_priv))
13059 intel_crt_init(dev_priv);
13062 * Haswell uses DDI functions to detect digital outputs.
13063 * On SKL pre-D0 the strap isn't connected, so we assume
13066 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
13067 /* WaIgnoreDDIAStrap: skl */
13068 if (found || IS_GEN9_BC(dev_priv))
13069 intel_ddi_init(dev_priv, PORT_A);
13071 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
13073 found = intel_de_read(dev_priv, SFUSE_STRAP);
13075 if (found & SFUSE_STRAP_DDIB_DETECTED)
13076 intel_ddi_init(dev_priv, PORT_B);
13077 if (found & SFUSE_STRAP_DDIC_DETECTED)
13078 intel_ddi_init(dev_priv, PORT_C);
13079 if (found & SFUSE_STRAP_DDID_DETECTED)
13080 intel_ddi_init(dev_priv, PORT_D);
13081 if (found & SFUSE_STRAP_DDIF_DETECTED)
13082 intel_ddi_init(dev_priv, PORT_F);
13084 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
13086 if (IS_GEN9_BC(dev_priv) &&
13087 intel_bios_is_port_present(dev_priv, PORT_E))
13088 intel_ddi_init(dev_priv, PORT_E);
13090 } else if (HAS_PCH_SPLIT(dev_priv)) {
13094 * intel_edp_init_connector() depends on this completing first,
13095 * to prevent the registration of both eDP and LVDS and the
13096 * incorrect sharing of the PPS.
13098 intel_lvds_init(dev_priv);
13099 intel_crt_init(dev_priv);
13101 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
13103 if (ilk_has_edp_a(dev_priv))
13104 intel_dp_init(dev_priv, DP_A, PORT_A);
13106 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
13107 /* PCH SDVOB multiplex with HDMIB */
13108 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
13110 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
13111 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
13112 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
13115 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
13116 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
13118 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
13119 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
13121 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
13122 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
13124 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
13125 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
13126 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13127 bool has_edp, has_port;
13129 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
13130 intel_crt_init(dev_priv);
13133 * The DP_DETECTED bit is the latched state of the DDC
13134 * SDA pin at boot. However since eDP doesn't require DDC
13135 * (no way to plug in a DP->HDMI dongle) the DDC pins for
13136 * eDP ports may have been muxed to an alternate function.
13137 * Thus we can't rely on the DP_DETECTED bit alone to detect
13138 * eDP ports. Consult the VBT as well as DP_DETECTED to
13139 * detect eDP ports.
13141 * Sadly the straps seem to be missing sometimes even for HDMI
13142 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
13143 * and VBT for the presence of the port. Additionally we can't
13144 * trust the port type the VBT declares as we've seen at least
13145 * HDMI ports that the VBT claim are DP or eDP.
13147 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
13148 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
13149 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
13150 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
13151 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
13152 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
13154 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
13155 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
13156 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
13157 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
13158 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
13159 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
13161 if (IS_CHERRYVIEW(dev_priv)) {
13163 * eDP not supported on port D,
13164 * so no need to worry about it
13166 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
13167 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
13168 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
13169 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
13170 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
13173 vlv_dsi_init(dev_priv);
13174 } else if (IS_PINEVIEW(dev_priv)) {
13175 intel_lvds_init(dev_priv);
13176 intel_crt_init(dev_priv);
13177 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
13178 bool found = false;
13180 if (IS_MOBILE(dev_priv))
13181 intel_lvds_init(dev_priv);
13183 intel_crt_init(dev_priv);
13185 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
13186 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
13187 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
13188 if (!found && IS_G4X(dev_priv)) {
13189 drm_dbg_kms(&dev_priv->drm,
13190 "probing HDMI on SDVOB\n");
13191 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
13194 if (!found && IS_G4X(dev_priv))
13195 intel_dp_init(dev_priv, DP_B, PORT_B);
13198 /* Before G4X SDVOC doesn't have its own detect register */
13200 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
13201 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
13202 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
13205 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
13207 if (IS_G4X(dev_priv)) {
13208 drm_dbg_kms(&dev_priv->drm,
13209 "probing HDMI on SDVOC\n");
13210 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
13212 if (IS_G4X(dev_priv))
13213 intel_dp_init(dev_priv, DP_C, PORT_C);
13216 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
13217 intel_dp_init(dev_priv, DP_D, PORT_D);
13219 if (SUPPORTS_TV(dev_priv))
13220 intel_tv_init(dev_priv);
13221 } else if (IS_GEN(dev_priv, 2)) {
13222 if (IS_I85X(dev_priv))
13223 intel_lvds_init(dev_priv);
13225 intel_crt_init(dev_priv);
13226 intel_dvo_init(dev_priv);
13229 for_each_intel_encoder(&dev_priv->drm, encoder) {
13230 encoder->base.possible_crtcs =
13231 intel_encoder_possible_crtcs(encoder);
13232 encoder->base.possible_clones =
13233 intel_encoder_possible_clones(encoder);
13236 intel_init_pch_refclk(dev_priv);
13238 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
13241 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
13243 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
13245 drm_framebuffer_cleanup(fb);
13246 intel_frontbuffer_put(intel_fb->frontbuffer);
13251 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
13252 struct drm_file *file,
13253 unsigned int *handle)
13255 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13256 struct drm_i915_private *i915 = to_i915(obj->base.dev);
13258 if (obj->userptr.mm) {
13259 drm_dbg(&i915->drm,
13260 "attempting to use a userptr for a framebuffer, denied\n");
13264 return drm_gem_handle_create(file, &obj->base, handle);
13267 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
13268 struct drm_file *file,
13269 unsigned flags, unsigned color,
13270 struct drm_clip_rect *clips,
13271 unsigned num_clips)
13273 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13275 i915_gem_object_flush_if_display(obj);
13276 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
13281 static const struct drm_framebuffer_funcs intel_fb_funcs = {
13282 .destroy = intel_user_framebuffer_destroy,
13283 .create_handle = intel_user_framebuffer_create_handle,
13284 .dirty = intel_user_framebuffer_dirty,
13287 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
13288 struct drm_i915_gem_object *obj,
13289 struct drm_mode_fb_cmd2 *mode_cmd)
13291 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
13292 struct drm_framebuffer *fb = &intel_fb->base;
13294 unsigned int tiling, stride;
13298 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
13299 if (!intel_fb->frontbuffer)
13302 i915_gem_object_lock(obj, NULL);
13303 tiling = i915_gem_object_get_tiling(obj);
13304 stride = i915_gem_object_get_stride(obj);
13305 i915_gem_object_unlock(obj);
13307 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
13309 * If there's a fence, enforce that
13310 * the fb modifier and tiling mode match.
13312 if (tiling != I915_TILING_NONE &&
13313 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
13314 drm_dbg_kms(&dev_priv->drm,
13315 "tiling_mode doesn't match fb modifier\n");
13319 if (tiling == I915_TILING_X) {
13320 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
13321 } else if (tiling == I915_TILING_Y) {
13322 drm_dbg_kms(&dev_priv->drm,
13323 "No Y tiling for legacy addfb\n");
13328 if (!drm_any_plane_has_format(&dev_priv->drm,
13329 mode_cmd->pixel_format,
13330 mode_cmd->modifier[0])) {
13331 struct drm_format_name_buf format_name;
13333 drm_dbg_kms(&dev_priv->drm,
13334 "unsupported pixel format %s / modifier 0x%llx\n",
13335 drm_get_format_name(mode_cmd->pixel_format,
13337 mode_cmd->modifier[0]);
13342 * gen2/3 display engine uses the fence if present,
13343 * so the tiling mode must match the fb modifier exactly.
13345 if (INTEL_GEN(dev_priv) < 4 &&
13346 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
13347 drm_dbg_kms(&dev_priv->drm,
13348 "tiling_mode must match fb modifier exactly on gen2/3\n");
13352 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
13353 mode_cmd->modifier[0]);
13354 if (mode_cmd->pitches[0] > max_stride) {
13355 drm_dbg_kms(&dev_priv->drm,
13356 "%s pitch (%u) must be at most %d\n",
13357 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
13358 "tiled" : "linear",
13359 mode_cmd->pitches[0], max_stride);
13364 * If there's a fence, enforce that
13365 * the fb pitch and fence stride match.
13367 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
13368 drm_dbg_kms(&dev_priv->drm,
13369 "pitch (%d) must match tiling stride (%d)\n",
13370 mode_cmd->pitches[0], stride);
13374 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
13375 if (mode_cmd->offsets[0] != 0) {
13376 drm_dbg_kms(&dev_priv->drm,
13377 "plane 0 offset (0x%08x) must be 0\n",
13378 mode_cmd->offsets[0]);
13382 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
13384 for (i = 0; i < fb->format->num_planes; i++) {
13385 u32 stride_alignment;
13387 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
13388 drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
13393 stride_alignment = intel_fb_stride_alignment(fb, i);
13394 if (fb->pitches[i] & (stride_alignment - 1)) {
13395 drm_dbg_kms(&dev_priv->drm,
13396 "plane %d pitch (%d) must be at least %u byte aligned\n",
13397 i, fb->pitches[i], stride_alignment);
13401 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
13402 int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
13404 if (fb->pitches[i] != ccs_aux_stride) {
13405 drm_dbg_kms(&dev_priv->drm,
13406 "ccs aux plane %d pitch (%d) must be %d\n",
13408 fb->pitches[i], ccs_aux_stride);
13413 fb->obj[i] = &obj->base;
13416 ret = intel_fill_fb_info(dev_priv, fb);
13420 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
13422 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
13429 intel_frontbuffer_put(intel_fb->frontbuffer);
13433 static struct drm_framebuffer *
13434 intel_user_framebuffer_create(struct drm_device *dev,
13435 struct drm_file *filp,
13436 const struct drm_mode_fb_cmd2 *user_mode_cmd)
13438 struct drm_framebuffer *fb;
13439 struct drm_i915_gem_object *obj;
13440 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
13442 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
13444 return ERR_PTR(-ENOENT);
13446 fb = intel_framebuffer_create(obj, &mode_cmd);
13447 i915_gem_object_put(obj);
13452 static enum drm_mode_status
13453 intel_mode_valid(struct drm_device *dev,
13454 const struct drm_display_mode *mode)
13456 struct drm_i915_private *dev_priv = to_i915(dev);
13457 int hdisplay_max, htotal_max;
13458 int vdisplay_max, vtotal_max;
13461 * Can't reject DBLSCAN here because Xorg ddxen can add piles
13462 * of DBLSCAN modes to the output's mode list when they detect
13463 * the scaling mode property on the connector. And they don't
13464 * ask the kernel to validate those modes in any way until
13465 * modeset time at which point the client gets a protocol error.
13466 * So in order to not upset those clients we silently ignore the
13467 * DBLSCAN flag on such connectors. For other connectors we will
13468 * reject modes with the DBLSCAN flag in encoder->compute_config().
13469 * And we always reject DBLSCAN modes in connector->mode_valid()
13470 * as we never want such modes on the connector's mode list.
13473 if (mode->vscan > 1)
13474 return MODE_NO_VSCAN;
13476 if (mode->flags & DRM_MODE_FLAG_HSKEW)
13477 return MODE_H_ILLEGAL;
13479 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
13480 DRM_MODE_FLAG_NCSYNC |
13481 DRM_MODE_FLAG_PCSYNC))
13484 if (mode->flags & (DRM_MODE_FLAG_BCAST |
13485 DRM_MODE_FLAG_PIXMUX |
13486 DRM_MODE_FLAG_CLKDIV2))
13489 /* Transcoder timing limits */
13490 if (INTEL_GEN(dev_priv) >= 11) {
13491 hdisplay_max = 16384;
13492 vdisplay_max = 8192;
13493 htotal_max = 16384;
13495 } else if (INTEL_GEN(dev_priv) >= 9 ||
13496 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
13497 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
13498 vdisplay_max = 4096;
13501 } else if (INTEL_GEN(dev_priv) >= 3) {
13502 hdisplay_max = 4096;
13503 vdisplay_max = 4096;
13507 hdisplay_max = 2048;
13508 vdisplay_max = 2048;
13513 if (mode->hdisplay > hdisplay_max ||
13514 mode->hsync_start > htotal_max ||
13515 mode->hsync_end > htotal_max ||
13516 mode->htotal > htotal_max)
13517 return MODE_H_ILLEGAL;
13519 if (mode->vdisplay > vdisplay_max ||
13520 mode->vsync_start > vtotal_max ||
13521 mode->vsync_end > vtotal_max ||
13522 mode->vtotal > vtotal_max)
13523 return MODE_V_ILLEGAL;
13525 if (INTEL_GEN(dev_priv) >= 5) {
13526 if (mode->hdisplay < 64 ||
13527 mode->htotal - mode->hdisplay < 32)
13528 return MODE_H_ILLEGAL;
13530 if (mode->vtotal - mode->vdisplay < 5)
13531 return MODE_V_ILLEGAL;
13533 if (mode->htotal - mode->hdisplay < 32)
13534 return MODE_H_ILLEGAL;
13536 if (mode->vtotal - mode->vdisplay < 3)
13537 return MODE_V_ILLEGAL;
13543 enum drm_mode_status
13544 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
13545 const struct drm_display_mode *mode,
13548 int plane_width_max, plane_height_max;
13551 * intel_mode_valid() should be
13552 * sufficient on older platforms.
13554 if (INTEL_GEN(dev_priv) < 9)
13558 * Most people will probably want a fullscreen
13559 * plane so let's not advertize modes that are
13560 * too big for that.
13562 if (INTEL_GEN(dev_priv) >= 11) {
13563 plane_width_max = 5120 << bigjoiner;
13564 plane_height_max = 4320;
13566 plane_width_max = 5120;
13567 plane_height_max = 4096;
13570 if (mode->hdisplay > plane_width_max)
13571 return MODE_H_ILLEGAL;
13573 if (mode->vdisplay > plane_height_max)
13574 return MODE_V_ILLEGAL;
13579 static const struct drm_mode_config_funcs intel_mode_funcs = {
13580 .fb_create = intel_user_framebuffer_create,
13581 .get_format_info = intel_get_format_info,
13582 .output_poll_changed = intel_fbdev_output_poll_changed,
13583 .mode_valid = intel_mode_valid,
13584 .atomic_check = intel_atomic_check,
13585 .atomic_commit = intel_atomic_commit,
13586 .atomic_state_alloc = intel_atomic_state_alloc,
13587 .atomic_state_clear = intel_atomic_state_clear,
13588 .atomic_state_free = intel_atomic_state_free,
13592 * intel_init_display_hooks - initialize the display modesetting hooks
13593 * @dev_priv: device private
13595 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
13597 intel_init_cdclk_hooks(dev_priv);
13599 intel_dpll_init_clock_hook(dev_priv);
13601 if (INTEL_GEN(dev_priv) >= 9) {
13602 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
13603 dev_priv->display.crtc_enable = hsw_crtc_enable;
13604 dev_priv->display.crtc_disable = hsw_crtc_disable;
13605 } else if (HAS_DDI(dev_priv)) {
13606 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
13607 dev_priv->display.crtc_enable = hsw_crtc_enable;
13608 dev_priv->display.crtc_disable = hsw_crtc_disable;
13609 } else if (HAS_PCH_SPLIT(dev_priv)) {
13610 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
13611 dev_priv->display.crtc_enable = ilk_crtc_enable;
13612 dev_priv->display.crtc_disable = ilk_crtc_disable;
13613 } else if (IS_CHERRYVIEW(dev_priv) ||
13614 IS_VALLEYVIEW(dev_priv)) {
13615 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
13616 dev_priv->display.crtc_enable = valleyview_crtc_enable;
13617 dev_priv->display.crtc_disable = i9xx_crtc_disable;
13619 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
13620 dev_priv->display.crtc_enable = i9xx_crtc_enable;
13621 dev_priv->display.crtc_disable = i9xx_crtc_disable;
13624 intel_fdi_init_hook(dev_priv);
13626 if (INTEL_GEN(dev_priv) >= 9) {
13627 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
13628 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
13630 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
13631 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
13636 void intel_modeset_init_hw(struct drm_i915_private *i915)
13638 struct intel_cdclk_state *cdclk_state =
13639 to_intel_cdclk_state(i915->cdclk.obj.state);
13641 intel_update_cdclk(i915);
13642 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
13643 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
13646 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
13648 struct drm_plane *plane;
13649 struct intel_crtc *crtc;
13651 for_each_intel_crtc(state->dev, crtc) {
13652 struct intel_crtc_state *crtc_state;
13654 crtc_state = intel_atomic_get_crtc_state(state, crtc);
13655 if (IS_ERR(crtc_state))
13656 return PTR_ERR(crtc_state);
13658 if (crtc_state->hw.active) {
13660 * Preserve the inherited flag to avoid
13661 * taking the full modeset path.
13663 crtc_state->inherited = true;
13667 drm_for_each_plane(plane, state->dev) {
13668 struct drm_plane_state *plane_state;
13670 plane_state = drm_atomic_get_plane_state(state, plane);
13671 if (IS_ERR(plane_state))
13672 return PTR_ERR(plane_state);
13679 * Calculate what we think the watermarks should be for the state we've read
13680 * out of the hardware and then immediately program those watermarks so that
13681 * we ensure the hardware settings match our internal state.
13683 * We can calculate what we think WM's should be by creating a duplicate of the
13684 * current state (which was constructed during hardware readout) and running it
13685 * through the atomic check code to calculate new watermark values in the
13688 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
13690 struct drm_atomic_state *state;
13691 struct intel_atomic_state *intel_state;
13692 struct intel_crtc *crtc;
13693 struct intel_crtc_state *crtc_state;
13694 struct drm_modeset_acquire_ctx ctx;
13698 /* Only supported on platforms that use atomic watermark design */
13699 if (!dev_priv->display.optimize_watermarks)
13702 state = drm_atomic_state_alloc(&dev_priv->drm);
13703 if (drm_WARN_ON(&dev_priv->drm, !state))
13706 intel_state = to_intel_atomic_state(state);
13708 drm_modeset_acquire_init(&ctx, 0);
13711 state->acquire_ctx = &ctx;
13714 * Hardware readout is the only time we don't want to calculate
13715 * intermediate watermarks (since we don't trust the current
13718 if (!HAS_GMCH(dev_priv))
13719 intel_state->skip_intermediate_wm = true;
13721 ret = sanitize_watermarks_add_affected(state);
13725 ret = intel_atomic_check(&dev_priv->drm, state);
13729 /* Write calculated watermark values back */
13730 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
13731 crtc_state->wm.need_postvbl_update = true;
13732 dev_priv->display.optimize_watermarks(intel_state, crtc);
13734 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
13738 if (ret == -EDEADLK) {
13739 drm_atomic_state_clear(state);
13740 drm_modeset_backoff(&ctx);
13745 * If we fail here, it means that the hardware appears to be
13746 * programmed in a way that shouldn't be possible, given our
13747 * understanding of watermark requirements. This might mean a
13748 * mistake in the hardware readout code or a mistake in the
13749 * watermark calculations for a given platform. Raise a WARN
13750 * so that this is noticeable.
13752 * If this actually happens, we'll have to just leave the
13753 * BIOS-programmed watermarks untouched and hope for the best.
13755 drm_WARN(&dev_priv->drm, ret,
13756 "Could not determine valid watermarks for inherited state\n");
13758 drm_atomic_state_put(state);
13760 drm_modeset_drop_locks(&ctx);
13761 drm_modeset_acquire_fini(&ctx);
13764 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
13766 if (IS_GEN(dev_priv, 5)) {
13768 intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
13770 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
13771 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
13772 dev_priv->fdi_pll_freq = 270000;
13777 drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
13780 static int intel_initial_commit(struct drm_device *dev)
13782 struct drm_atomic_state *state = NULL;
13783 struct drm_modeset_acquire_ctx ctx;
13784 struct intel_crtc *crtc;
13787 state = drm_atomic_state_alloc(dev);
13791 drm_modeset_acquire_init(&ctx, 0);
13794 state->acquire_ctx = &ctx;
13796 for_each_intel_crtc(dev, crtc) {
13797 struct intel_crtc_state *crtc_state =
13798 intel_atomic_get_crtc_state(state, crtc);
13800 if (IS_ERR(crtc_state)) {
13801 ret = PTR_ERR(crtc_state);
13805 if (crtc_state->hw.active) {
13806 struct intel_encoder *encoder;
13809 * We've not yet detected sink capabilities
13810 * (audio,infoframes,etc.) and thus we don't want to
13811 * force a full state recomputation yet. We want that to
13812 * happen only for the first real commit from userspace.
13813 * So preserve the inherited flag for the time being.
13815 crtc_state->inherited = true;
13817 ret = drm_atomic_add_affected_planes(state, &crtc->base);
13822 * FIXME hack to force a LUT update to avoid the
13823 * plane update forcing the pipe gamma on without
13824 * having a proper LUT loaded. Remove once we
13825 * have readout for pipe gamma enable.
13827 crtc_state->uapi.color_mgmt_changed = true;
13829 for_each_intel_encoder_mask(dev, encoder,
13830 crtc_state->uapi.encoder_mask) {
13831 if (encoder->initial_fastset_check &&
13832 !encoder->initial_fastset_check(encoder, crtc_state)) {
13833 ret = drm_atomic_add_affected_connectors(state,
13842 ret = drm_atomic_commit(state);
13845 if (ret == -EDEADLK) {
13846 drm_atomic_state_clear(state);
13847 drm_modeset_backoff(&ctx);
13851 drm_atomic_state_put(state);
13853 drm_modeset_drop_locks(&ctx);
13854 drm_modeset_acquire_fini(&ctx);
13859 static void intel_mode_config_init(struct drm_i915_private *i915)
13861 struct drm_mode_config *mode_config = &i915->drm.mode_config;
13863 drm_mode_config_init(&i915->drm);
13864 INIT_LIST_HEAD(&i915->global_obj_list);
13866 mode_config->min_width = 0;
13867 mode_config->min_height = 0;
13869 mode_config->preferred_depth = 24;
13870 mode_config->prefer_shadow = 1;
13872 mode_config->allow_fb_modifiers = true;
13874 mode_config->funcs = &intel_mode_funcs;
13876 mode_config->async_page_flip = has_async_flips(i915);
13879 * Maximum framebuffer dimensions, chosen to match
13880 * the maximum render engine surface size on gen4+.
13882 if (INTEL_GEN(i915) >= 7) {
13883 mode_config->max_width = 16384;
13884 mode_config->max_height = 16384;
13885 } else if (INTEL_GEN(i915) >= 4) {
13886 mode_config->max_width = 8192;
13887 mode_config->max_height = 8192;
13888 } else if (IS_GEN(i915, 3)) {
13889 mode_config->max_width = 4096;
13890 mode_config->max_height = 4096;
13892 mode_config->max_width = 2048;
13893 mode_config->max_height = 2048;
13896 if (IS_I845G(i915) || IS_I865G(i915)) {
13897 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
13898 mode_config->cursor_height = 1023;
13899 } else if (IS_I830(i915) || IS_I85X(i915) ||
13900 IS_I915G(i915) || IS_I915GM(i915)) {
13901 mode_config->cursor_width = 64;
13902 mode_config->cursor_height = 64;
13904 mode_config->cursor_width = 256;
13905 mode_config->cursor_height = 256;
13909 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
13911 intel_atomic_global_obj_cleanup(i915);
13912 drm_mode_config_cleanup(&i915->drm);
13915 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
13917 if (plane_config->fb) {
13918 struct drm_framebuffer *fb = &plane_config->fb->base;
13920 /* We may only have the stub and not a full framebuffer */
13921 if (drm_framebuffer_read_refcount(fb))
13922 drm_framebuffer_put(fb);
13927 if (plane_config->vma)
13928 i915_vma_put(plane_config->vma);
13931 /* part #1: call before irq install */
13932 int intel_modeset_init_noirq(struct drm_i915_private *i915)
13936 if (i915_inject_probe_failure(i915))
13939 if (HAS_DISPLAY(i915)) {
13940 ret = drm_vblank_init(&i915->drm,
13941 INTEL_NUM_PIPES(i915));
13946 intel_bios_init(i915);
13948 ret = intel_vga_register(i915);
13952 /* FIXME: completely on the wrong abstraction layer */
13953 intel_power_domains_init_hw(i915, false);
13955 intel_csr_ucode_init(i915);
13957 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
13958 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
13959 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
13961 i915->framestart_delay = 1; /* 1-4 */
13963 intel_mode_config_init(i915);
13965 ret = intel_cdclk_init(i915);
13967 goto cleanup_vga_client_pw_domain_csr;
13969 ret = intel_dbuf_init(i915);
13971 goto cleanup_vga_client_pw_domain_csr;
13973 ret = intel_bw_init(i915);
13975 goto cleanup_vga_client_pw_domain_csr;
13977 init_llist_head(&i915->atomic_helper.free_list);
13978 INIT_WORK(&i915->atomic_helper.free_work,
13979 intel_atomic_helper_free_state_worker);
13981 intel_init_quirks(i915);
13983 intel_fbc_init(i915);
13987 cleanup_vga_client_pw_domain_csr:
13988 intel_csr_ucode_fini(i915);
13989 intel_power_domains_driver_remove(i915);
13990 intel_vga_unregister(i915);
13992 intel_bios_driver_remove(i915);
13997 /* part #2: call after irq install, but before gem init */
13998 int intel_modeset_init_nogem(struct drm_i915_private *i915)
14000 struct drm_device *dev = &i915->drm;
14002 struct intel_crtc *crtc;
14005 intel_init_pm(i915);
14007 intel_panel_sanitize_ssc(i915);
14009 intel_pps_setup(i915);
14011 intel_gmbus_setup(i915);
14013 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
14014 INTEL_NUM_PIPES(i915),
14015 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
14017 if (HAS_DISPLAY(i915)) {
14018 for_each_pipe(i915, pipe) {
14019 ret = intel_crtc_init(i915, pipe);
14021 intel_mode_config_cleanup(i915);
14027 intel_plane_possible_crtcs_init(i915);
14028 intel_shared_dpll_init(dev);
14029 intel_update_fdi_pll_freq(i915);
14031 intel_update_czclk(i915);
14032 intel_modeset_init_hw(i915);
14034 intel_hdcp_component_init(i915);
14036 if (i915->max_cdclk_freq == 0)
14037 intel_update_max_cdclk(i915);
14040 * If the platform has HTI, we need to find out whether it has reserved
14041 * any display resources before we create our display outputs.
14043 if (INTEL_INFO(i915)->display.has_hti)
14044 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
14046 /* Just disable it once at startup */
14047 intel_vga_disable(i915);
14048 intel_setup_outputs(i915);
14050 drm_modeset_lock_all(dev);
14051 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
14052 drm_modeset_unlock_all(dev);
14054 for_each_intel_crtc(dev, crtc) {
14055 struct intel_initial_plane_config plane_config = {};
14057 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
14061 * Note that reserving the BIOS fb up front prevents us
14062 * from stuffing other stolen allocations like the ring
14063 * on top. This prevents some ugliness at boot time, and
14064 * can even allow for smooth boot transitions if the BIOS
14065 * fb is large enough for the active pipe configuration.
14067 i915->display.get_initial_plane_config(crtc, &plane_config);
14070 * If the fb is shared between multiple heads, we'll
14071 * just get the first one.
14073 intel_find_initial_plane_obj(crtc, &plane_config);
14075 plane_config_fini(&plane_config);
14079 * Make sure hardware watermarks really match the state we read out.
14080 * Note that we need to do this after reconstructing the BIOS fb's
14081 * since the watermark calculation done here will use pstate->fb.
14083 if (!HAS_GMCH(i915))
14084 sanitize_watermarks(i915);
14089 /* part #3: call after gem init */
14090 int intel_modeset_init(struct drm_i915_private *i915)
14094 if (!HAS_DISPLAY(i915))
14098 * Force all active planes to recompute their states. So that on
14099 * mode_setcrtc after probe, all the intel_plane_state variables
14100 * are already calculated and there is no assert_plane warnings
14103 ret = intel_initial_commit(&i915->drm);
14105 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
14107 intel_overlay_setup(i915);
14109 ret = intel_fbdev_init(&i915->drm);
14113 /* Only enable hotplug handling once the fbdev is fully set up. */
14114 intel_hpd_init(i915);
14115 intel_hpd_poll_disable(i915);
14117 intel_init_ipc(i915);
14122 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14124 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14125 /* 640x480@60Hz, ~25175 kHz */
14126 struct dpll clock = {
14136 drm_WARN_ON(&dev_priv->drm,
14137 i9xx_calc_dpll_params(48000, &clock) != 25154);
14139 drm_dbg_kms(&dev_priv->drm,
14140 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
14141 pipe_name(pipe), clock.vco, clock.dot);
14143 fp = i9xx_dpll_compute_fp(&clock);
14144 dpll = DPLL_DVO_2X_MODE |
14145 DPLL_VGA_MODE_DIS |
14146 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
14147 PLL_P2_DIVIDE_BY_4 |
14148 PLL_REF_INPUT_DREFCLK |
14151 intel_de_write(dev_priv, FP0(pipe), fp);
14152 intel_de_write(dev_priv, FP1(pipe), fp);
14154 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
14155 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
14156 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
14157 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
14158 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
14159 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
14160 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
14163 * Apparently we need to have VGA mode enabled prior to changing
14164 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
14165 * dividers, even though the register value does change.
14167 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
14168 intel_de_write(dev_priv, DPLL(pipe), dpll);
14170 /* Wait for the clocks to stabilize. */
14171 intel_de_posting_read(dev_priv, DPLL(pipe));
14174 /* The pixel multiplier can only be updated once the
14175 * DPLL is enabled and the clocks are stable.
14177 * So write it again.
14179 intel_de_write(dev_priv, DPLL(pipe), dpll);
14181 /* We do this three times for luck */
14182 for (i = 0; i < 3 ; i++) {
14183 intel_de_write(dev_priv, DPLL(pipe), dpll);
14184 intel_de_posting_read(dev_priv, DPLL(pipe));
14185 udelay(150); /* wait for warmup */
14188 intel_de_write(dev_priv, PIPECONF(pipe),
14189 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
14190 intel_de_posting_read(dev_priv, PIPECONF(pipe));
14192 intel_wait_for_pipe_scanline_moving(crtc);
14195 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14197 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14199 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
14202 drm_WARN_ON(&dev_priv->drm,
14203 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
14204 DISPLAY_PLANE_ENABLE);
14205 drm_WARN_ON(&dev_priv->drm,
14206 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
14207 DISPLAY_PLANE_ENABLE);
14208 drm_WARN_ON(&dev_priv->drm,
14209 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
14210 DISPLAY_PLANE_ENABLE);
14211 drm_WARN_ON(&dev_priv->drm,
14212 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
14213 drm_WARN_ON(&dev_priv->drm,
14214 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
14216 intel_de_write(dev_priv, PIPECONF(pipe), 0);
14217 intel_de_posting_read(dev_priv, PIPECONF(pipe));
14219 intel_wait_for_pipe_scanline_stopped(crtc);
14221 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
14222 intel_de_posting_read(dev_priv, DPLL(pipe));
14226 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
14228 struct intel_crtc *crtc;
14230 if (INTEL_GEN(dev_priv) >= 4)
14233 for_each_intel_crtc(&dev_priv->drm, crtc) {
14234 struct intel_plane *plane =
14235 to_intel_plane(crtc->base.primary);
14236 struct intel_crtc *plane_crtc;
14239 if (!plane->get_hw_state(plane, &pipe))
14242 if (pipe == crtc->pipe)
14245 drm_dbg_kms(&dev_priv->drm,
14246 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
14247 plane->base.base.id, plane->base.name);
14249 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14250 intel_plane_disable_noatomic(plane_crtc, plane);
14254 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
14256 struct drm_device *dev = crtc->base.dev;
14257 struct intel_encoder *encoder;
14259 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
14265 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
14267 struct drm_device *dev = encoder->base.dev;
14268 struct intel_connector *connector;
14270 for_each_connector_on_encoder(dev, &encoder->base, connector)
14276 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
14277 enum pipe pch_transcoder)
14279 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
14280 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
14283 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
14285 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14287 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
14289 if (INTEL_GEN(dev_priv) >= 9 ||
14290 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14291 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
14294 if (transcoder_is_dsi(cpu_transcoder))
14297 val = intel_de_read(dev_priv, reg);
14298 val &= ~HSW_FRAME_START_DELAY_MASK;
14299 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14300 intel_de_write(dev_priv, reg, val);
14302 i915_reg_t reg = PIPECONF(cpu_transcoder);
14305 val = intel_de_read(dev_priv, reg);
14306 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
14307 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14308 intel_de_write(dev_priv, reg, val);
14311 if (!crtc_state->has_pch_encoder)
14314 if (HAS_PCH_IBX(dev_priv)) {
14315 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
14318 val = intel_de_read(dev_priv, reg);
14319 val &= ~TRANS_FRAME_START_DELAY_MASK;
14320 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14321 intel_de_write(dev_priv, reg, val);
14323 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
14324 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
14327 val = intel_de_read(dev_priv, reg);
14328 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
14329 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14330 intel_de_write(dev_priv, reg, val);
14334 static void intel_sanitize_crtc(struct intel_crtc *crtc,
14335 struct drm_modeset_acquire_ctx *ctx)
14337 struct drm_device *dev = crtc->base.dev;
14338 struct drm_i915_private *dev_priv = to_i915(dev);
14339 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
14341 if (crtc_state->hw.active) {
14342 struct intel_plane *plane;
14344 /* Clear any frame start delays used for debugging left by the BIOS */
14345 intel_sanitize_frame_start_delay(crtc_state);
14347 /* Disable everything but the primary plane */
14348 for_each_intel_plane_on_crtc(dev, crtc, plane) {
14349 const struct intel_plane_state *plane_state =
14350 to_intel_plane_state(plane->base.state);
14352 if (plane_state->uapi.visible &&
14353 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
14354 intel_plane_disable_noatomic(crtc, plane);
14358 * Disable any background color set by the BIOS, but enable the
14359 * gamma and CSC to match how we program our planes.
14361 if (INTEL_GEN(dev_priv) >= 9)
14362 intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
14363 SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
14366 /* Adjust the state of the output pipe according to whether we
14367 * have active connectors/encoders. */
14368 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
14369 !crtc_state->bigjoiner_slave)
14370 intel_crtc_disable_noatomic(crtc, ctx);
14372 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
14374 * We start out with underrun reporting disabled to avoid races.
14375 * For correct bookkeeping mark this on active crtcs.
14377 * Also on gmch platforms we dont have any hardware bits to
14378 * disable the underrun reporting. Which means we need to start
14379 * out with underrun reporting disabled also on inactive pipes,
14380 * since otherwise we'll complain about the garbage we read when
14381 * e.g. coming up after runtime pm.
14383 * No protection against concurrent access is required - at
14384 * worst a fifo underrun happens which also sets this to false.
14386 crtc->cpu_fifo_underrun_disabled = true;
14388 * We track the PCH trancoder underrun reporting state
14389 * within the crtc. With crtc for pipe A housing the underrun
14390 * reporting state for PCH transcoder A, crtc for pipe B housing
14391 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
14392 * and marking underrun reporting as disabled for the non-existing
14393 * PCH transcoders B and C would prevent enabling the south
14394 * error interrupt (see cpt_can_enable_serr_int()).
14396 if (has_pch_trancoder(dev_priv, crtc->pipe))
14397 crtc->pch_fifo_underrun_disabled = true;
14401 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
14403 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
14406 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
14407 * the hardware when a high res displays plugged in. DPLL P
14408 * divider is zero, and the pipe timings are bonkers. We'll
14409 * try to disable everything in that case.
14411 * FIXME would be nice to be able to sanitize this state
14412 * without several WARNs, but for now let's take the easy
14415 return IS_GEN(dev_priv, 6) &&
14416 crtc_state->hw.active &&
14417 crtc_state->shared_dpll &&
14418 crtc_state->port_clock == 0;
14421 static void intel_sanitize_encoder(struct intel_encoder *encoder)
14423 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
14424 struct intel_connector *connector;
14425 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
14426 struct intel_crtc_state *crtc_state = crtc ?
14427 to_intel_crtc_state(crtc->base.state) : NULL;
14429 /* We need to check both for a crtc link (meaning that the
14430 * encoder is active and trying to read from a pipe) and the
14431 * pipe itself being active. */
14432 bool has_active_crtc = crtc_state &&
14433 crtc_state->hw.active;
14435 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
14436 drm_dbg_kms(&dev_priv->drm,
14437 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
14438 pipe_name(crtc->pipe));
14439 has_active_crtc = false;
14442 connector = intel_encoder_find_connector(encoder);
14443 if (connector && !has_active_crtc) {
14444 drm_dbg_kms(&dev_priv->drm,
14445 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
14446 encoder->base.base.id,
14447 encoder->base.name);
14449 /* Connector is active, but has no active pipe. This is
14450 * fallout from our resume register restoring. Disable
14451 * the encoder manually again. */
14453 struct drm_encoder *best_encoder;
14455 drm_dbg_kms(&dev_priv->drm,
14456 "[ENCODER:%d:%s] manually disabled\n",
14457 encoder->base.base.id,
14458 encoder->base.name);
14460 /* avoid oopsing in case the hooks consult best_encoder */
14461 best_encoder = connector->base.state->best_encoder;
14462 connector->base.state->best_encoder = &encoder->base;
14464 /* FIXME NULL atomic state passed! */
14465 if (encoder->disable)
14466 encoder->disable(NULL, encoder, crtc_state,
14467 connector->base.state);
14468 if (encoder->post_disable)
14469 encoder->post_disable(NULL, encoder, crtc_state,
14470 connector->base.state);
14472 connector->base.state->best_encoder = best_encoder;
14474 encoder->base.crtc = NULL;
14476 /* Inconsistent output/port/pipe state happens presumably due to
14477 * a bug in one of the get_hw_state functions. Or someplace else
14478 * in our code, like the register restore mess on resume. Clamp
14479 * things to off as a safer default. */
14481 connector->base.dpms = DRM_MODE_DPMS_OFF;
14482 connector->base.encoder = NULL;
14485 /* notify opregion of the sanitized encoder state */
14486 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
14488 if (INTEL_GEN(dev_priv) >= 11)
14489 icl_sanitize_encoder_pll_mapping(encoder);
14492 /* FIXME read out full plane state for all planes */
14493 static void readout_plane_state(struct drm_i915_private *dev_priv)
14495 struct intel_plane *plane;
14496 struct intel_crtc *crtc;
14498 for_each_intel_plane(&dev_priv->drm, plane) {
14499 struct intel_plane_state *plane_state =
14500 to_intel_plane_state(plane->base.state);
14501 struct intel_crtc_state *crtc_state;
14502 enum pipe pipe = PIPE_A;
14505 visible = plane->get_hw_state(plane, &pipe);
14507 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14508 crtc_state = to_intel_crtc_state(crtc->base.state);
14510 intel_set_plane_visible(crtc_state, plane_state, visible);
14512 drm_dbg_kms(&dev_priv->drm,
14513 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
14514 plane->base.base.id, plane->base.name,
14515 enableddisabled(visible), pipe_name(pipe));
14518 for_each_intel_crtc(&dev_priv->drm, crtc) {
14519 struct intel_crtc_state *crtc_state =
14520 to_intel_crtc_state(crtc->base.state);
14522 fixup_plane_bitmasks(crtc_state);
14526 static void intel_modeset_readout_hw_state(struct drm_device *dev)
14528 struct drm_i915_private *dev_priv = to_i915(dev);
14529 struct intel_cdclk_state *cdclk_state =
14530 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
14531 struct intel_dbuf_state *dbuf_state =
14532 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
14534 struct intel_crtc *crtc;
14535 struct intel_encoder *encoder;
14536 struct intel_connector *connector;
14537 struct drm_connector_list_iter conn_iter;
14538 u8 active_pipes = 0;
14540 for_each_intel_crtc(dev, crtc) {
14541 struct intel_crtc_state *crtc_state =
14542 to_intel_crtc_state(crtc->base.state);
14544 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
14545 intel_crtc_free_hw_state(crtc_state);
14546 intel_crtc_state_reset(crtc_state, crtc);
14548 intel_crtc_get_pipe_config(crtc_state);
14550 crtc_state->hw.enable = crtc_state->hw.active;
14552 crtc->base.enabled = crtc_state->hw.enable;
14553 crtc->active = crtc_state->hw.active;
14555 if (crtc_state->hw.active)
14556 active_pipes |= BIT(crtc->pipe);
14558 drm_dbg_kms(&dev_priv->drm,
14559 "[CRTC:%d:%s] hw state readout: %s\n",
14560 crtc->base.base.id, crtc->base.name,
14561 enableddisabled(crtc_state->hw.active));
14564 dev_priv->active_pipes = cdclk_state->active_pipes =
14565 dbuf_state->active_pipes = active_pipes;
14567 readout_plane_state(dev_priv);
14569 intel_dpll_readout_hw_state(dev_priv);
14571 for_each_intel_encoder(dev, encoder) {
14574 if (encoder->get_hw_state(encoder, &pipe)) {
14575 struct intel_crtc_state *crtc_state;
14577 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14578 crtc_state = to_intel_crtc_state(crtc->base.state);
14580 encoder->base.crtc = &crtc->base;
14581 intel_encoder_get_config(encoder, crtc_state);
14582 if (encoder->sync_state)
14583 encoder->sync_state(encoder, crtc_state);
14585 /* read out to slave crtc as well for bigjoiner */
14586 if (crtc_state->bigjoiner) {
14587 /* encoder should read be linked to bigjoiner master */
14588 WARN_ON(crtc_state->bigjoiner_slave);
14590 crtc = crtc_state->bigjoiner_linked_crtc;
14591 crtc_state = to_intel_crtc_state(crtc->base.state);
14592 intel_encoder_get_config(encoder, crtc_state);
14595 encoder->base.crtc = NULL;
14598 drm_dbg_kms(&dev_priv->drm,
14599 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
14600 encoder->base.base.id, encoder->base.name,
14601 enableddisabled(encoder->base.crtc),
14605 drm_connector_list_iter_begin(dev, &conn_iter);
14606 for_each_intel_connector_iter(connector, &conn_iter) {
14607 if (connector->get_hw_state(connector)) {
14608 struct intel_crtc_state *crtc_state;
14609 struct intel_crtc *crtc;
14611 connector->base.dpms = DRM_MODE_DPMS_ON;
14613 encoder = intel_attached_encoder(connector);
14614 connector->base.encoder = &encoder->base;
14616 crtc = to_intel_crtc(encoder->base.crtc);
14617 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
14619 if (crtc_state && crtc_state->hw.active) {
14621 * This has to be done during hardware readout
14622 * because anything calling .crtc_disable may
14623 * rely on the connector_mask being accurate.
14625 crtc_state->uapi.connector_mask |=
14626 drm_connector_mask(&connector->base);
14627 crtc_state->uapi.encoder_mask |=
14628 drm_encoder_mask(&encoder->base);
14631 connector->base.dpms = DRM_MODE_DPMS_OFF;
14632 connector->base.encoder = NULL;
14634 drm_dbg_kms(&dev_priv->drm,
14635 "[CONNECTOR:%d:%s] hw state readout: %s\n",
14636 connector->base.base.id, connector->base.name,
14637 enableddisabled(connector->base.encoder));
14639 drm_connector_list_iter_end(&conn_iter);
14641 for_each_intel_crtc(dev, crtc) {
14642 struct intel_bw_state *bw_state =
14643 to_intel_bw_state(dev_priv->bw_obj.state);
14644 struct intel_crtc_state *crtc_state =
14645 to_intel_crtc_state(crtc->base.state);
14646 struct intel_plane *plane;
14649 if (crtc_state->bigjoiner_slave)
14652 if (crtc_state->hw.active) {
14654 * The initial mode needs to be set in order to keep
14655 * the atomic core happy. It wants a valid mode if the
14656 * crtc's enabled, so we do the above call.
14658 * But we don't set all the derived state fully, hence
14659 * set a flag to indicate that a full recalculation is
14660 * needed on the next commit.
14662 crtc_state->inherited = true;
14664 intel_crtc_update_active_timings(crtc_state);
14666 intel_crtc_copy_hw_to_uapi_state(crtc_state);
14669 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14670 const struct intel_plane_state *plane_state =
14671 to_intel_plane_state(plane->base.state);
14674 * FIXME don't have the fb yet, so can't
14675 * use intel_plane_data_rate() :(
14677 if (plane_state->uapi.visible)
14678 crtc_state->data_rate[plane->id] =
14679 4 * crtc_state->pixel_rate;
14681 * FIXME don't have the fb yet, so can't
14682 * use plane->min_cdclk() :(
14684 if (plane_state->uapi.visible && plane->min_cdclk) {
14685 if (crtc_state->double_wide ||
14686 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
14687 crtc_state->min_cdclk[plane->id] =
14688 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
14690 crtc_state->min_cdclk[plane->id] =
14691 crtc_state->pixel_rate;
14693 drm_dbg_kms(&dev_priv->drm,
14694 "[PLANE:%d:%s] min_cdclk %d kHz\n",
14695 plane->base.base.id, plane->base.name,
14696 crtc_state->min_cdclk[plane->id]);
14699 if (crtc_state->hw.active) {
14700 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
14701 if (drm_WARN_ON(dev, min_cdclk < 0))
14705 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
14706 cdclk_state->min_voltage_level[crtc->pipe] =
14707 crtc_state->min_voltage_level;
14709 intel_bw_crtc_update(bw_state, crtc_state);
14711 intel_pipe_config_sanity_check(dev_priv, crtc_state);
14713 /* discard our incomplete slave state, copy it from master */
14714 if (crtc_state->bigjoiner && crtc_state->hw.active) {
14715 struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
14716 struct intel_crtc_state *slave_crtc_state =
14717 to_intel_crtc_state(slave->base.state);
14719 copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
14720 slave->base.mode = crtc->base.mode;
14722 cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
14723 cdclk_state->min_voltage_level[slave->pipe] =
14724 crtc_state->min_voltage_level;
14726 for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
14727 const struct intel_plane_state *plane_state =
14728 to_intel_plane_state(plane->base.state);
14731 * FIXME don't have the fb yet, so can't
14732 * use intel_plane_data_rate() :(
14734 if (plane_state->uapi.visible)
14735 crtc_state->data_rate[plane->id] =
14736 4 * crtc_state->pixel_rate;
14738 crtc_state->data_rate[plane->id] = 0;
14741 intel_bw_crtc_update(bw_state, slave_crtc_state);
14742 drm_calc_timestamping_constants(&slave->base,
14743 &slave_crtc_state->hw.adjusted_mode);
14749 get_encoder_power_domains(struct drm_i915_private *dev_priv)
14751 struct intel_encoder *encoder;
14753 for_each_intel_encoder(&dev_priv->drm, encoder) {
14754 struct intel_crtc_state *crtc_state;
14756 if (!encoder->get_power_domains)
14760 * MST-primary and inactive encoders don't have a crtc state
14761 * and neither of these require any power domain references.
14763 if (!encoder->base.crtc)
14766 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
14767 encoder->get_power_domains(encoder, crtc_state);
14771 static void intel_early_display_was(struct drm_i915_private *dev_priv)
14774 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
14775 * Also known as Wa_14010480278.
14777 if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
14778 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
14779 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
14781 if (IS_HASWELL(dev_priv)) {
14783 * WaRsPkgCStateDisplayPMReq:hsw
14784 * System hang if this isn't done before disabling all planes!
14786 intel_de_write(dev_priv, CHICKEN_PAR1_1,
14787 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
14790 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
14791 /* Display WA #1142:kbl,cfl,cml */
14792 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
14793 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
14794 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
14795 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
14796 KBL_ARB_FILL_SPARE_14);
14800 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
14801 enum port port, i915_reg_t hdmi_reg)
14803 u32 val = intel_de_read(dev_priv, hdmi_reg);
14805 if (val & SDVO_ENABLE ||
14806 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
14809 drm_dbg_kms(&dev_priv->drm,
14810 "Sanitizing transcoder select for HDMI %c\n",
14813 val &= ~SDVO_PIPE_SEL_MASK;
14814 val |= SDVO_PIPE_SEL(PIPE_A);
14816 intel_de_write(dev_priv, hdmi_reg, val);
14819 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
14820 enum port port, i915_reg_t dp_reg)
14822 u32 val = intel_de_read(dev_priv, dp_reg);
14824 if (val & DP_PORT_EN ||
14825 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
14828 drm_dbg_kms(&dev_priv->drm,
14829 "Sanitizing transcoder select for DP %c\n",
14832 val &= ~DP_PIPE_SEL_MASK;
14833 val |= DP_PIPE_SEL(PIPE_A);
14835 intel_de_write(dev_priv, dp_reg, val);
14838 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
14841 * The BIOS may select transcoder B on some of the PCH
14842 * ports even it doesn't enable the port. This would trip
14843 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
14844 * Sanitize the transcoder select bits to prevent that. We
14845 * assume that the BIOS never actually enabled the port,
14846 * because if it did we'd actually have to toggle the port
14847 * on and back off to make the transcoder A select stick
14848 * (see. intel_dp_link_down(), intel_disable_hdmi(),
14849 * intel_disable_sdvo()).
14851 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
14852 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
14853 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
14855 /* PCH SDVOB multiplex with HDMIB */
14856 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
14857 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
14858 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
14861 /* Scan out the current hw modeset state,
14862 * and sanitizes it to the current state
14865 intel_modeset_setup_hw_state(struct drm_device *dev,
14866 struct drm_modeset_acquire_ctx *ctx)
14868 struct drm_i915_private *dev_priv = to_i915(dev);
14869 struct intel_encoder *encoder;
14870 struct intel_crtc *crtc;
14871 intel_wakeref_t wakeref;
14873 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
14875 intel_early_display_was(dev_priv);
14876 intel_modeset_readout_hw_state(dev);
14878 /* HW state is read out, now we need to sanitize this mess. */
14880 /* Sanitize the TypeC port mode upfront, encoders depend on this */
14881 for_each_intel_encoder(dev, encoder) {
14882 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
14884 /* We need to sanitize only the MST primary port. */
14885 if (encoder->type != INTEL_OUTPUT_DP_MST &&
14886 intel_phy_is_tc(dev_priv, phy))
14887 intel_tc_port_sanitize(enc_to_dig_port(encoder));
14890 get_encoder_power_domains(dev_priv);
14892 if (HAS_PCH_IBX(dev_priv))
14893 ibx_sanitize_pch_ports(dev_priv);
14896 * intel_sanitize_plane_mapping() may need to do vblank
14897 * waits, so we need vblank interrupts restored beforehand.
14899 for_each_intel_crtc(&dev_priv->drm, crtc) {
14900 struct intel_crtc_state *crtc_state =
14901 to_intel_crtc_state(crtc->base.state);
14903 drm_crtc_vblank_reset(&crtc->base);
14905 if (crtc_state->hw.active)
14906 intel_crtc_vblank_on(crtc_state);
14909 intel_sanitize_plane_mapping(dev_priv);
14911 for_each_intel_encoder(dev, encoder)
14912 intel_sanitize_encoder(encoder);
14914 for_each_intel_crtc(&dev_priv->drm, crtc) {
14915 struct intel_crtc_state *crtc_state =
14916 to_intel_crtc_state(crtc->base.state);
14918 intel_sanitize_crtc(crtc, ctx);
14919 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
14922 intel_modeset_update_connector_atomic_state(dev);
14924 intel_dpll_sanitize_state(dev_priv);
14926 if (IS_G4X(dev_priv)) {
14927 g4x_wm_get_hw_state(dev_priv);
14928 g4x_wm_sanitize(dev_priv);
14929 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14930 vlv_wm_get_hw_state(dev_priv);
14931 vlv_wm_sanitize(dev_priv);
14932 } else if (INTEL_GEN(dev_priv) >= 9) {
14933 skl_wm_get_hw_state(dev_priv);
14934 } else if (HAS_PCH_SPLIT(dev_priv)) {
14935 ilk_wm_get_hw_state(dev_priv);
14938 for_each_intel_crtc(dev, crtc) {
14939 struct intel_crtc_state *crtc_state =
14940 to_intel_crtc_state(crtc->base.state);
14943 put_domains = modeset_get_crtc_power_domains(crtc_state);
14944 if (drm_WARN_ON(dev, put_domains))
14945 modeset_put_crtc_power_domains(crtc, put_domains);
14948 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
14951 void intel_display_resume(struct drm_device *dev)
14953 struct drm_i915_private *dev_priv = to_i915(dev);
14954 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
14955 struct drm_modeset_acquire_ctx ctx;
14958 dev_priv->modeset_restore_state = NULL;
14960 state->acquire_ctx = &ctx;
14962 drm_modeset_acquire_init(&ctx, 0);
14965 ret = drm_modeset_lock_all_ctx(dev, &ctx);
14966 if (ret != -EDEADLK)
14969 drm_modeset_backoff(&ctx);
14973 ret = __intel_display_resume(dev, state, &ctx);
14975 intel_enable_ipc(dev_priv);
14976 drm_modeset_drop_locks(&ctx);
14977 drm_modeset_acquire_fini(&ctx);
14980 drm_err(&dev_priv->drm,
14981 "Restoring old state failed with %i\n", ret);
14983 drm_atomic_state_put(state);
14986 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
14988 struct intel_connector *connector;
14989 struct drm_connector_list_iter conn_iter;
14991 /* Kill all the work that may have been queued by hpd. */
14992 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
14993 for_each_intel_connector_iter(connector, &conn_iter) {
14994 if (connector->modeset_retry_work.func)
14995 cancel_work_sync(&connector->modeset_retry_work);
14996 if (connector->hdcp.shim) {
14997 cancel_delayed_work_sync(&connector->hdcp.check_work);
14998 cancel_work_sync(&connector->hdcp.prop_work);
15001 drm_connector_list_iter_end(&conn_iter);
15004 /* part #1: call before irq uninstall */
15005 void intel_modeset_driver_remove(struct drm_i915_private *i915)
15007 flush_workqueue(i915->flip_wq);
15008 flush_workqueue(i915->modeset_wq);
15010 flush_work(&i915->atomic_helper.free_work);
15011 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
15014 /* part #2: call after irq uninstall */
15015 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
15018 * Due to the hpd irq storm handling the hotplug work can re-arm the
15019 * poll handlers. Hence disable polling after hpd handling is shut down.
15021 intel_hpd_poll_fini(i915);
15024 * MST topology needs to be suspended so we don't have any calls to
15025 * fbdev after it's finalized. MST will be destroyed later as part of
15026 * drm_mode_config_cleanup()
15028 intel_dp_mst_suspend(i915);
15030 /* poll work can call into fbdev, hence clean that up afterwards */
15031 intel_fbdev_fini(i915);
15033 intel_unregister_dsm_handler();
15035 intel_fbc_global_disable(i915);
15037 /* flush any delayed tasks or pending work */
15038 flush_scheduled_work();
15040 intel_hdcp_component_fini(i915);
15042 intel_mode_config_cleanup(i915);
15044 intel_overlay_cleanup(i915);
15046 intel_gmbus_teardown(i915);
15048 destroy_workqueue(i915->flip_wq);
15049 destroy_workqueue(i915->modeset_wq);
15051 intel_fbc_cleanup_cfb(i915);
15054 /* part #3: call after gem init */
15055 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
15057 intel_csr_ucode_fini(i915);
15059 intel_power_domains_driver_remove(i915);
15061 intel_vga_unregister(i915);
15063 intel_bios_driver_remove(i915);
15066 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
15068 struct intel_display_error_state {
15070 u32 power_well_driver;
15072 struct intel_cursor_error_state {
15077 } cursor[I915_MAX_PIPES];
15079 struct intel_pipe_error_state {
15080 bool power_domain_on;
15083 } pipe[I915_MAX_PIPES];
15085 struct intel_plane_error_state {
15093 } plane[I915_MAX_PIPES];
15095 struct intel_transcoder_error_state {
15097 bool power_domain_on;
15098 enum transcoder cpu_transcoder;
15111 struct intel_display_error_state *
15112 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
15114 struct intel_display_error_state *error;
15115 int transcoders[] = {
15124 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
15126 if (!HAS_DISPLAY(dev_priv))
15129 error = kzalloc(sizeof(*error), GFP_ATOMIC);
15133 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15134 error->power_well_driver = intel_de_read(dev_priv,
15135 HSW_PWR_WELL_CTL2);
15137 for_each_pipe(dev_priv, i) {
15138 error->pipe[i].power_domain_on =
15139 __intel_display_power_is_enabled(dev_priv,
15140 POWER_DOMAIN_PIPE(i));
15141 if (!error->pipe[i].power_domain_on)
15144 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
15145 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
15146 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
15148 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
15149 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
15150 if (INTEL_GEN(dev_priv) <= 3) {
15151 error->plane[i].size = intel_de_read(dev_priv,
15153 error->plane[i].pos = intel_de_read(dev_priv,
15156 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15157 error->plane[i].addr = intel_de_read(dev_priv,
15159 if (INTEL_GEN(dev_priv) >= 4) {
15160 error->plane[i].surface = intel_de_read(dev_priv,
15162 error->plane[i].tile_offset = intel_de_read(dev_priv,
15166 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
15168 if (HAS_GMCH(dev_priv))
15169 error->pipe[i].stat = intel_de_read(dev_priv,
15173 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
15174 enum transcoder cpu_transcoder = transcoders[i];
15176 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
15179 error->transcoder[i].available = true;
15180 error->transcoder[i].power_domain_on =
15181 __intel_display_power_is_enabled(dev_priv,
15182 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15183 if (!error->transcoder[i].power_domain_on)
15186 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15188 error->transcoder[i].conf = intel_de_read(dev_priv,
15189 PIPECONF(cpu_transcoder));
15190 error->transcoder[i].htotal = intel_de_read(dev_priv,
15191 HTOTAL(cpu_transcoder));
15192 error->transcoder[i].hblank = intel_de_read(dev_priv,
15193 HBLANK(cpu_transcoder));
15194 error->transcoder[i].hsync = intel_de_read(dev_priv,
15195 HSYNC(cpu_transcoder));
15196 error->transcoder[i].vtotal = intel_de_read(dev_priv,
15197 VTOTAL(cpu_transcoder));
15198 error->transcoder[i].vblank = intel_de_read(dev_priv,
15199 VBLANK(cpu_transcoder));
15200 error->transcoder[i].vsync = intel_de_read(dev_priv,
15201 VSYNC(cpu_transcoder));
15207 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15210 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15211 struct intel_display_error_state *error)
15213 struct drm_i915_private *dev_priv = m->i915;
15219 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
15220 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15221 err_printf(m, "PWR_WELL_CTL2: %08x\n",
15222 error->power_well_driver);
15223 for_each_pipe(dev_priv, i) {
15224 err_printf(m, "Pipe [%d]:\n", i);
15225 err_printf(m, " Power: %s\n",
15226 onoff(error->pipe[i].power_domain_on));
15227 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
15228 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
15230 err_printf(m, "Plane [%d]:\n", i);
15231 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15232 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
15233 if (INTEL_GEN(dev_priv) <= 3) {
15234 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15235 err_printf(m, " POS: %08x\n", error->plane[i].pos);
15237 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15238 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
15239 if (INTEL_GEN(dev_priv) >= 4) {
15240 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15241 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
15244 err_printf(m, "Cursor [%d]:\n", i);
15245 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15246 err_printf(m, " POS: %08x\n", error->cursor[i].position);
15247 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
15250 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
15251 if (!error->transcoder[i].available)
15254 err_printf(m, "CPU transcoder: %s\n",
15255 transcoder_name(error->transcoder[i].cpu_transcoder));
15256 err_printf(m, " Power: %s\n",
15257 onoff(error->transcoder[i].power_domain_on));
15258 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15259 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15260 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15261 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15262 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15263 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15264 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);