2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 #include <linux/string_helpers.h>
36 #include <linux/vga_switcheroo.h>
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/drm_atomic.h>
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_atomic_uapi.h>
42 #include <drm/drm_damage_helper.h>
43 #include <drm/drm_edid.h>
44 #include <drm/drm_fourcc.h>
45 #include <drm/drm_plane_helper.h>
46 #include <drm/drm_privacy_screen_consumer.h>
47 #include <drm/drm_probe_helper.h>
48 #include <drm/drm_rect.h>
50 #include "display/intel_audio.h"
51 #include "display/intel_crt.h"
52 #include "display/intel_ddi.h"
53 #include "display/intel_display_debugfs.h"
54 #include "display/intel_dp.h"
55 #include "display/intel_dp_mst.h"
56 #include "display/intel_dpll.h"
57 #include "display/intel_dpll_mgr.h"
58 #include "display/intel_drrs.h"
59 #include "display/intel_dsi.h"
60 #include "display/intel_dvo.h"
61 #include "display/intel_fb.h"
62 #include "display/intel_gmbus.h"
63 #include "display/intel_hdmi.h"
64 #include "display/intel_lvds.h"
65 #include "display/intel_sdvo.h"
66 #include "display/intel_snps_phy.h"
67 #include "display/intel_tv.h"
68 #include "display/intel_vdsc.h"
69 #include "display/intel_vrr.h"
71 #include "gem/i915_gem_lmem.h"
72 #include "gem/i915_gem_object.h"
74 #include "gt/gen8_ppgtt.h"
80 #include "i915_utils.h"
82 #include "intel_acpi.h"
83 #include "intel_atomic.h"
84 #include "intel_atomic_plane.h"
86 #include "intel_cdclk.h"
87 #include "intel_color.h"
88 #include "intel_crtc.h"
90 #include "intel_display_types.h"
91 #include "intel_dmc.h"
92 #include "intel_dp_link_training.h"
93 #include "intel_dpt.h"
94 #include "intel_fbc.h"
95 #include "intel_fbdev.h"
96 #include "intel_fdi.h"
97 #include "intel_fifo_underrun.h"
98 #include "intel_frontbuffer.h"
99 #include "intel_hdcp.h"
100 #include "intel_hotplug.h"
101 #include "intel_overlay.h"
102 #include "intel_panel.h"
103 #include "intel_pch_display.h"
104 #include "intel_pch_refclk.h"
105 #include "intel_pcode.h"
106 #include "intel_pipe_crc.h"
107 #include "intel_plane_initial.h"
108 #include "intel_pm.h"
109 #include "intel_pps.h"
110 #include "intel_psr.h"
111 #include "intel_quirks.h"
112 #include "intel_sprite.h"
113 #include "intel_tc.h"
114 #include "intel_vga.h"
115 #include "i9xx_plane.h"
116 #include "skl_scaler.h"
117 #include "skl_universal_plane.h"
119 #include "vlv_dsi_pll.h"
120 #include "vlv_dsi_regs.h"
121 #include "vlv_sideband.h"
123 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
124 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
125 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
126 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
127 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
128 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
129 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
130 static void intel_modeset_setup_hw_state(struct drm_device *dev,
131 struct drm_modeset_acquire_ctx *ctx);
134 * intel_update_watermarks - update FIFO watermark values based on current modes
135 * @dev_priv: i915 device
137 * Calculate watermark values for the various WM regs based on current mode
138 * and plane configuration.
140 * There are several cases to deal with here:
141 * - normal (i.e. non-self-refresh)
142 * - self-refresh (SR) mode
143 * - lines are large relative to FIFO size (buffer can hold up to 2)
144 * - lines are small relative to FIFO size (buffer can hold more than 2
145 * lines), so need to account for TLB latency
147 * The normal calculation is:
148 * watermark = dotclock * bytes per pixel * latency
149 * where latency is platform & configuration dependent (we assume pessimal
152 * The SR calculation is:
153 * watermark = (trunc(latency/line time)+1) * surface width *
156 * line time = htotal / dotclock
157 * surface width = hdisplay for normal plane and 64 for cursor
158 * and latency is assumed to be high, as above.
160 * The final value programmed to the register should always be rounded up,
161 * and include an extra 2 entries to account for clock crossings.
163 * We don't use the sprite, so we can ignore that. And on Crestline we have
164 * to set the non-SR watermarks to 8.
166 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
168 if (dev_priv->wm_disp->update_wm)
169 dev_priv->wm_disp->update_wm(dev_priv);
172 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
173 struct intel_crtc *crtc)
175 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
176 if (dev_priv->wm_disp->compute_pipe_wm)
177 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
181 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
182 struct intel_crtc *crtc)
184 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
185 if (!dev_priv->wm_disp->compute_intermediate_wm)
187 if (drm_WARN_ON(&dev_priv->drm,
188 !dev_priv->wm_disp->compute_pipe_wm))
190 return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
193 static bool intel_initial_watermarks(struct intel_atomic_state *state,
194 struct intel_crtc *crtc)
196 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
197 if (dev_priv->wm_disp->initial_watermarks) {
198 dev_priv->wm_disp->initial_watermarks(state, crtc);
204 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
205 struct intel_crtc *crtc)
207 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
208 if (dev_priv->wm_disp->atomic_update_watermarks)
209 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
212 static void intel_optimize_watermarks(struct intel_atomic_state *state,
213 struct intel_crtc *crtc)
215 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
216 if (dev_priv->wm_disp->optimize_watermarks)
217 dev_priv->wm_disp->optimize_watermarks(state, crtc);
220 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
222 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
223 if (dev_priv->wm_disp->compute_global_watermarks)
224 return dev_priv->wm_disp->compute_global_watermarks(state);
228 /* returns HPLL frequency in kHz */
229 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
231 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
233 /* Obtain SKU information */
234 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
235 CCK_FUSE_HPLL_FREQ_MASK;
237 return vco_freq[hpll_freq] * 1000;
240 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
241 const char *name, u32 reg, int ref_freq)
246 val = vlv_cck_read(dev_priv, reg);
247 divider = val & CCK_FREQUENCY_VALUES;
249 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
250 (divider << CCK_FREQUENCY_STATUS_SHIFT),
251 "%s change in progress\n", name);
253 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
256 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
257 const char *name, u32 reg)
261 vlv_cck_get(dev_priv);
263 if (dev_priv->hpll_freq == 0)
264 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
266 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
268 vlv_cck_put(dev_priv);
273 static void intel_update_czclk(struct drm_i915_private *dev_priv)
275 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
278 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
279 CCK_CZ_CLOCK_CONTROL);
281 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
282 dev_priv->czclk_freq);
285 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
287 return (crtc_state->active_planes &
288 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
291 /* WA Display #0827: Gen9:all */
293 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
296 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
297 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
299 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
300 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
303 /* Wa_2006604312:icl,ehl */
305 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
309 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
310 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
312 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
313 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
316 /* Wa_1604331009:icl,jsl,ehl */
318 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
321 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
322 enable ? CURSOR_GATING_DIS : 0);
326 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
328 return crtc_state->master_transcoder != INVALID_TRANSCODER;
332 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
334 return crtc_state->sync_mode_slaves_mask != 0;
338 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
340 return is_trans_port_sync_master(crtc_state) ||
341 is_trans_port_sync_slave(crtc_state);
344 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
346 return ffs(crtc_state->bigjoiner_pipes) - 1;
349 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
351 if (crtc_state->bigjoiner_pipes)
352 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
357 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
359 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
361 return crtc_state->bigjoiner_pipes &&
362 crtc->pipe != bigjoiner_master_pipe(crtc_state);
365 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
367 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
369 return crtc_state->bigjoiner_pipes &&
370 crtc->pipe == bigjoiner_master_pipe(crtc_state);
373 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
375 return hweight8(crtc_state->bigjoiner_pipes);
378 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
380 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
382 if (intel_crtc_is_bigjoiner_slave(crtc_state))
383 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
385 return to_intel_crtc(crtc_state->uapi.crtc);
388 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
391 i915_reg_t reg = PIPEDSL(pipe);
394 line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
396 line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
398 return line1 != line2;
401 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
403 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
404 enum pipe pipe = crtc->pipe;
406 /* Wait for the display line to settle/start moving */
407 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
408 drm_err(&dev_priv->drm,
409 "pipe %c scanline %s wait timed out\n",
410 pipe_name(pipe), str_on_off(state));
413 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
415 wait_for_pipe_scanline_moving(crtc, false);
418 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
420 wait_for_pipe_scanline_moving(crtc, true);
424 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
426 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
429 if (DISPLAY_VER(dev_priv) >= 4) {
430 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
432 /* Wait for the Pipe State to go off */
433 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
434 PIPECONF_STATE_ENABLE, 100))
435 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
437 intel_wait_for_pipe_scanline_stopped(crtc);
441 void assert_transcoder(struct drm_i915_private *dev_priv,
442 enum transcoder cpu_transcoder, bool state)
445 enum intel_display_power_domain power_domain;
446 intel_wakeref_t wakeref;
448 /* we keep both pipes enabled on 830 */
449 if (IS_I830(dev_priv))
452 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
453 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
455 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
456 cur_state = !!(val & PIPECONF_ENABLE);
458 intel_display_power_put(dev_priv, power_domain, wakeref);
463 I915_STATE_WARN(cur_state != state,
464 "transcoder %s assertion failure (expected %s, current %s)\n",
465 transcoder_name(cpu_transcoder),
466 str_on_off(state), str_on_off(cur_state));
469 static void assert_plane(struct intel_plane *plane, bool state)
474 cur_state = plane->get_hw_state(plane, &pipe);
476 I915_STATE_WARN(cur_state != state,
477 "%s assertion failure (expected %s, current %s)\n",
478 plane->base.name, str_on_off(state),
479 str_on_off(cur_state));
482 #define assert_plane_enabled(p) assert_plane(p, true)
483 #define assert_plane_disabled(p) assert_plane(p, false)
485 static void assert_planes_disabled(struct intel_crtc *crtc)
487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
488 struct intel_plane *plane;
490 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
491 assert_plane_disabled(plane);
494 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
495 struct intel_digital_port *dig_port,
496 unsigned int expected_mask)
501 switch (dig_port->base.port) {
503 port_mask = DPLL_PORTB_READY_MASK;
507 port_mask = DPLL_PORTC_READY_MASK;
512 port_mask = DPLL_PORTD_READY_MASK;
513 dpll_reg = DPIO_PHY_STATUS;
519 if (intel_de_wait_for_register(dev_priv, dpll_reg,
520 port_mask, expected_mask, 1000))
521 drm_WARN(&dev_priv->drm, 1,
522 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
523 dig_port->base.base.base.id, dig_port->base.base.name,
524 intel_de_read(dev_priv, dpll_reg) & port_mask,
528 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
530 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
531 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
532 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
533 enum pipe pipe = crtc->pipe;
537 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
539 assert_planes_disabled(crtc);
542 * A pipe without a PLL won't actually be able to drive bits from
543 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
546 if (HAS_GMCH(dev_priv)) {
547 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
548 assert_dsi_pll_enabled(dev_priv);
550 assert_pll_enabled(dev_priv, pipe);
552 if (new_crtc_state->has_pch_encoder) {
553 /* if driving the PCH, we need FDI enabled */
554 assert_fdi_rx_pll_enabled(dev_priv,
555 intel_crtc_pch_transcoder(crtc));
556 assert_fdi_tx_pll_enabled(dev_priv,
557 (enum pipe) cpu_transcoder);
559 /* FIXME: assert CPU port conditions for SNB+ */
562 /* Wa_22012358565:adl-p */
563 if (DISPLAY_VER(dev_priv) == 13)
564 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
565 0, PIPE_ARB_USE_PROG_SLOTS);
567 reg = PIPECONF(cpu_transcoder);
568 val = intel_de_read(dev_priv, reg);
569 if (val & PIPECONF_ENABLE) {
570 /* we keep both pipes enabled on 830 */
571 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
575 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
576 intel_de_posting_read(dev_priv, reg);
579 * Until the pipe starts PIPEDSL reads will return a stale value,
580 * which causes an apparent vblank timestamp jump when PIPEDSL
581 * resets to its proper value. That also messes up the frame count
582 * when it's derived from the timestamps. So let's wait for the
583 * pipe to start properly before we call drm_crtc_vblank_on()
585 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
586 intel_wait_for_pipe_scanline_moving(crtc);
589 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
591 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
592 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
593 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
594 enum pipe pipe = crtc->pipe;
598 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
601 * Make sure planes won't keep trying to pump pixels to us,
602 * or we might hang the display.
604 assert_planes_disabled(crtc);
606 reg = PIPECONF(cpu_transcoder);
607 val = intel_de_read(dev_priv, reg);
608 if ((val & PIPECONF_ENABLE) == 0)
612 * Double wide has implications for planes
613 * so best keep it disabled when not needed.
615 if (old_crtc_state->double_wide)
616 val &= ~PIPECONF_DOUBLE_WIDE;
618 /* Don't disable pipe or pipe PLLs if needed */
619 if (!IS_I830(dev_priv))
620 val &= ~PIPECONF_ENABLE;
622 if (DISPLAY_VER(dev_priv) >= 12)
623 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
624 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
626 intel_de_write(dev_priv, reg, val);
627 if ((val & PIPECONF_ENABLE) == 0)
628 intel_wait_for_pipe_off(old_crtc_state);
631 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
633 unsigned int size = 0;
636 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
637 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
642 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
644 unsigned int size = 0;
647 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
648 unsigned int plane_size;
650 if (rem_info->plane[i].linear)
651 plane_size = rem_info->plane[i].size;
653 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
658 if (rem_info->plane_alignment)
659 size = ALIGN(size, rem_info->plane_alignment);
667 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
669 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
670 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
672 return DISPLAY_VER(dev_priv) < 4 ||
674 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
678 * Convert the x/y offsets into a linear offset.
679 * Only valid with 0/180 degree rotation, which is fine since linear
680 * offset is only used with linear buffers on pre-hsw and tiled buffers
681 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
683 u32 intel_fb_xy_to_linear(int x, int y,
684 const struct intel_plane_state *state,
687 const struct drm_framebuffer *fb = state->hw.fb;
688 unsigned int cpp = fb->format->cpp[color_plane];
689 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
691 return y * pitch + x * cpp;
695 * Add the x/y offsets derived from fb->offsets[] to the user
696 * specified plane src x/y offsets. The resulting x/y offsets
697 * specify the start of scanout from the beginning of the gtt mapping.
699 void intel_add_fb_offsets(int *x, int *y,
700 const struct intel_plane_state *state,
704 *x += state->view.color_plane[color_plane].x;
705 *y += state->view.color_plane[color_plane].y;
708 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
709 u32 pixel_format, u64 modifier)
711 struct intel_crtc *crtc;
712 struct intel_plane *plane;
714 if (!HAS_DISPLAY(dev_priv))
718 * We assume the primary plane for pipe A has
719 * the highest stride limits of them all,
720 * if in case pipe A is disabled, use the first pipe from pipe_mask.
722 crtc = intel_first_crtc(dev_priv);
726 plane = to_intel_plane(crtc->base.primary);
728 return plane->max_stride(plane, pixel_format, modifier,
733 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
734 struct intel_plane_state *plane_state,
737 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
739 plane_state->uapi.visible = visible;
742 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
744 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
747 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
749 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
750 struct drm_plane *plane;
753 * Active_planes aliases if multiple "primary" or cursor planes
754 * have been used on the same (or wrong) pipe. plane_mask uses
755 * unique ids, hence we can use that to reconstruct active_planes.
757 crtc_state->enabled_planes = 0;
758 crtc_state->active_planes = 0;
760 drm_for_each_plane_mask(plane, &dev_priv->drm,
761 crtc_state->uapi.plane_mask) {
762 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
763 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
767 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
768 struct intel_plane *plane)
770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
771 struct intel_crtc_state *crtc_state =
772 to_intel_crtc_state(crtc->base.state);
773 struct intel_plane_state *plane_state =
774 to_intel_plane_state(plane->base.state);
776 drm_dbg_kms(&dev_priv->drm,
777 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
778 plane->base.base.id, plane->base.name,
779 crtc->base.base.id, crtc->base.name);
781 intel_set_plane_visible(crtc_state, plane_state, false);
782 fixup_plane_bitmasks(crtc_state);
783 crtc_state->data_rate[plane->id] = 0;
784 crtc_state->data_rate_y[plane->id] = 0;
785 crtc_state->rel_data_rate[plane->id] = 0;
786 crtc_state->rel_data_rate_y[plane->id] = 0;
787 crtc_state->min_cdclk[plane->id] = 0;
789 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
790 hsw_ips_disable(crtc_state)) {
791 crtc_state->ips_enabled = false;
792 intel_crtc_wait_for_next_vblank(crtc);
796 * Vblank time updates from the shadow to live plane control register
797 * are blocked if the memory self-refresh mode is active at that
798 * moment. So to make sure the plane gets truly disabled, disable
799 * first the self-refresh mode. The self-refresh enable bit in turn
800 * will be checked/applied by the HW only at the next frame start
801 * event which is after the vblank start event, so we need to have a
802 * wait-for-vblank between disabling the plane and the pipe.
804 if (HAS_GMCH(dev_priv) &&
805 intel_set_memory_cxsr(dev_priv, false))
806 intel_crtc_wait_for_next_vblank(crtc);
809 * Gen2 reports pipe underruns whenever all planes are disabled.
810 * So disable underrun reporting before all the planes get disabled.
812 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
813 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
815 intel_plane_disable_arm(plane, crtc_state);
816 intel_crtc_wait_for_next_vblank(crtc);
820 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
824 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
825 plane_state->view.color_plane[0].offset, 0);
831 __intel_display_resume(struct drm_device *dev,
832 struct drm_atomic_state *state,
833 struct drm_modeset_acquire_ctx *ctx)
835 struct drm_crtc_state *crtc_state;
836 struct drm_crtc *crtc;
839 intel_modeset_setup_hw_state(dev, ctx);
840 intel_vga_redisable(to_i915(dev));
846 * We've duplicated the state, pointers to the old state are invalid.
848 * Don't attempt to use the old state until we commit the duplicated state.
850 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
852 * Force recalculation even if we restore
853 * current state. With fast modeset this may not result
854 * in a modeset when the state is compatible.
856 crtc_state->mode_changed = true;
859 /* ignore any reset values/BIOS leftovers in the WM registers */
860 if (!HAS_GMCH(to_i915(dev)))
861 to_intel_atomic_state(state)->skip_intermediate_wm = true;
863 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
865 drm_WARN_ON(dev, ret == -EDEADLK);
869 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
871 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
872 intel_has_gpu_reset(to_gt(dev_priv)));
875 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
877 struct drm_device *dev = &dev_priv->drm;
878 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
879 struct drm_atomic_state *state;
882 if (!HAS_DISPLAY(dev_priv))
885 /* reset doesn't touch the display */
886 if (!dev_priv->params.force_reset_modeset_test &&
887 !gpu_reset_clobbers_display(dev_priv))
890 /* We have a modeset vs reset deadlock, defensively unbreak it. */
891 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
892 smp_mb__after_atomic();
893 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
895 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
896 drm_dbg_kms(&dev_priv->drm,
897 "Modeset potentially stuck, unbreaking through wedging\n");
898 intel_gt_set_wedged(to_gt(dev_priv));
902 * Need mode_config.mutex so that we don't
903 * trample ongoing ->detect() and whatnot.
905 mutex_lock(&dev->mode_config.mutex);
906 drm_modeset_acquire_init(ctx, 0);
908 ret = drm_modeset_lock_all_ctx(dev, ctx);
912 drm_modeset_backoff(ctx);
915 * Disabling the crtcs gracefully seems nicer. Also the
916 * g33 docs say we should at least disable all the planes.
918 state = drm_atomic_helper_duplicate_state(dev, ctx);
920 ret = PTR_ERR(state);
921 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
926 ret = drm_atomic_helper_disable_all(dev, ctx);
928 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
930 drm_atomic_state_put(state);
934 dev_priv->modeset_restore_state = state;
935 state->acquire_ctx = ctx;
938 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
940 struct drm_device *dev = &dev_priv->drm;
941 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
942 struct drm_atomic_state *state;
945 if (!HAS_DISPLAY(dev_priv))
948 /* reset doesn't touch the display */
949 if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
952 state = fetch_and_zero(&dev_priv->modeset_restore_state);
956 /* reset doesn't touch the display */
957 if (!gpu_reset_clobbers_display(dev_priv)) {
958 /* for testing only restore the display */
959 ret = __intel_display_resume(dev, state, ctx);
961 drm_err(&dev_priv->drm,
962 "Restoring old state failed with %i\n", ret);
965 * The display has been reset as well,
966 * so need a full re-initialization.
968 intel_pps_unlock_regs_wa(dev_priv);
969 intel_modeset_init_hw(dev_priv);
970 intel_init_clock_gating(dev_priv);
971 intel_hpd_init(dev_priv);
973 ret = __intel_display_resume(dev, state, ctx);
975 drm_err(&dev_priv->drm,
976 "Restoring old state failed with %i\n", ret);
978 intel_hpd_poll_disable(dev_priv);
981 drm_atomic_state_put(state);
983 drm_modeset_drop_locks(ctx);
984 drm_modeset_acquire_fini(ctx);
985 mutex_unlock(&dev->mode_config.mutex);
987 clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
990 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
992 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
993 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
994 enum pipe pipe = crtc->pipe;
997 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1000 * Display WA #1153: icl
1001 * enable hardware to bypass the alpha math
1002 * and rounding for per-pixel values 00 and 0xff
1004 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1006 * Display WA # 1605353570: icl
1007 * Set the pixel rounding bit to 1 for allowing
1008 * passthrough of Frame buffer pixels unmodified
1011 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1014 * Underrun recovery must always be disabled on display 13+.
1015 * DG2 chicken bit meaning is inverted compared to other platforms.
1017 if (IS_DG2(dev_priv))
1018 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
1019 else if (DISPLAY_VER(dev_priv) >= 13)
1020 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
1022 /* Wa_14010547955:dg2 */
1023 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
1024 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
1026 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1029 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1031 struct drm_crtc *crtc;
1034 drm_for_each_crtc(crtc, &dev_priv->drm) {
1035 struct drm_crtc_commit *commit;
1036 spin_lock(&crtc->commit_lock);
1037 commit = list_first_entry_or_null(&crtc->commit_list,
1038 struct drm_crtc_commit, commit_entry);
1039 cleanup_done = commit ?
1040 try_wait_for_completion(&commit->cleanup_done) : true;
1041 spin_unlock(&crtc->commit_lock);
1046 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1055 * Finds the encoder associated with the given CRTC. This can only be
1056 * used when we know that the CRTC isn't feeding multiple encoders!
1058 struct intel_encoder *
1059 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1060 const struct intel_crtc_state *crtc_state)
1062 const struct drm_connector_state *connector_state;
1063 const struct drm_connector *connector;
1064 struct intel_encoder *encoder = NULL;
1065 struct intel_crtc *master_crtc;
1066 int num_encoders = 0;
1069 master_crtc = intel_master_crtc(crtc_state);
1071 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1072 if (connector_state->crtc != &master_crtc->base)
1075 encoder = to_intel_encoder(connector_state->best_encoder);
1079 drm_WARN(encoder->base.dev, num_encoders != 1,
1080 "%d encoders for pipe %c\n",
1081 num_encoders, pipe_name(master_crtc->pipe));
1086 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1089 i915_reg_t dslreg = PIPEDSL(pipe);
1092 temp = intel_de_read(dev_priv, dslreg);
1094 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1095 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1096 drm_err(&dev_priv->drm,
1097 "mode set failed: pipe %c stuck\n",
1102 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1104 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1106 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1107 enum pipe pipe = crtc->pipe;
1108 int width = drm_rect_width(dst);
1109 int height = drm_rect_height(dst);
1113 if (!crtc_state->pch_pfit.enabled)
1116 /* Force use of hard-coded filter coefficients
1117 * as some pre-programmed values are broken,
1120 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1121 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1122 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1124 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
1126 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1127 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1130 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1133 (void) intel_overlay_switch_off(crtc->overlay);
1135 /* Let userspace switch the overlay on again. In most cases userspace
1136 * has to recompute where to put it anyway.
1140 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1142 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1144 if (!crtc_state->nv12_planes)
1147 /* WA Display #0827: Gen9:all */
1148 if (DISPLAY_VER(dev_priv) == 9)
1154 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1156 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1158 /* Wa_2006604312:icl,ehl */
1159 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1165 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1167 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1169 /* Wa_1604331009:icl,jsl,ehl */
1170 if (is_hdr_mode(crtc_state) &&
1171 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1172 DISPLAY_VER(dev_priv) == 11)
1178 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1179 enum pipe pipe, bool enable)
1181 if (DISPLAY_VER(i915) == 9) {
1183 * "Plane N strech max must be programmed to 11b (x1)
1184 * when Async flips are enabled on that plane."
1186 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1187 SKL_PLANE1_STRETCH_MAX_MASK,
1188 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1190 /* Also needed on HSW/BDW albeit undocumented */
1191 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1192 HSW_PRI_STRETCH_MAX_MASK,
1193 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1197 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1199 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1201 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
1202 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1205 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1206 const struct intel_crtc_state *new_crtc_state)
1208 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1209 new_crtc_state->active_planes;
1212 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1213 const struct intel_crtc_state *new_crtc_state)
1215 return old_crtc_state->active_planes &&
1216 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1219 static void intel_post_plane_update(struct intel_atomic_state *state,
1220 struct intel_crtc *crtc)
1222 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1223 const struct intel_crtc_state *old_crtc_state =
1224 intel_atomic_get_old_crtc_state(state, crtc);
1225 const struct intel_crtc_state *new_crtc_state =
1226 intel_atomic_get_new_crtc_state(state, crtc);
1227 enum pipe pipe = crtc->pipe;
1229 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1231 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1232 intel_update_watermarks(dev_priv);
1234 hsw_ips_post_update(state, crtc);
1235 intel_fbc_post_update(state, crtc);
1237 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1238 !needs_async_flip_vtd_wa(new_crtc_state))
1239 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1241 if (needs_nv12_wa(old_crtc_state) &&
1242 !needs_nv12_wa(new_crtc_state))
1243 skl_wa_827(dev_priv, pipe, false);
1245 if (needs_scalerclk_wa(old_crtc_state) &&
1246 !needs_scalerclk_wa(new_crtc_state))
1247 icl_wa_scalerclkgating(dev_priv, pipe, false);
1249 if (needs_cursorclk_wa(old_crtc_state) &&
1250 !needs_cursorclk_wa(new_crtc_state))
1251 icl_wa_cursorclkgating(dev_priv, pipe, false);
1253 intel_drrs_activate(new_crtc_state);
1256 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1257 struct intel_crtc *crtc)
1259 const struct intel_crtc_state *crtc_state =
1260 intel_atomic_get_new_crtc_state(state, crtc);
1261 u8 update_planes = crtc_state->update_planes;
1262 const struct intel_plane_state *plane_state;
1263 struct intel_plane *plane;
1266 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1267 if (plane->pipe == crtc->pipe &&
1268 update_planes & BIT(plane->id))
1269 plane->enable_flip_done(plane);
1273 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1274 struct intel_crtc *crtc)
1276 const struct intel_crtc_state *crtc_state =
1277 intel_atomic_get_new_crtc_state(state, crtc);
1278 u8 update_planes = crtc_state->update_planes;
1279 const struct intel_plane_state *plane_state;
1280 struct intel_plane *plane;
1283 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1284 if (plane->pipe == crtc->pipe &&
1285 update_planes & BIT(plane->id))
1286 plane->disable_flip_done(plane);
1290 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1291 struct intel_crtc *crtc)
1293 const struct intel_crtc_state *old_crtc_state =
1294 intel_atomic_get_old_crtc_state(state, crtc);
1295 const struct intel_crtc_state *new_crtc_state =
1296 intel_atomic_get_new_crtc_state(state, crtc);
1297 u8 update_planes = new_crtc_state->update_planes;
1298 const struct intel_plane_state *old_plane_state;
1299 struct intel_plane *plane;
1300 bool need_vbl_wait = false;
1303 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1304 if (plane->need_async_flip_disable_wa &&
1305 plane->pipe == crtc->pipe &&
1306 update_planes & BIT(plane->id)) {
1308 * Apart from the async flip bit we want to
1309 * preserve the old state for the plane.
1311 plane->async_flip(plane, old_crtc_state,
1312 old_plane_state, false);
1313 need_vbl_wait = true;
1318 intel_crtc_wait_for_next_vblank(crtc);
1321 static void intel_pre_plane_update(struct intel_atomic_state *state,
1322 struct intel_crtc *crtc)
1324 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1325 const struct intel_crtc_state *old_crtc_state =
1326 intel_atomic_get_old_crtc_state(state, crtc);
1327 const struct intel_crtc_state *new_crtc_state =
1328 intel_atomic_get_new_crtc_state(state, crtc);
1329 enum pipe pipe = crtc->pipe;
1331 intel_drrs_deactivate(old_crtc_state);
1333 intel_psr_pre_plane_update(state, crtc);
1335 if (hsw_ips_pre_update(state, crtc))
1336 intel_crtc_wait_for_next_vblank(crtc);
1338 if (intel_fbc_pre_update(state, crtc))
1339 intel_crtc_wait_for_next_vblank(crtc);
1341 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1342 needs_async_flip_vtd_wa(new_crtc_state))
1343 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1345 /* Display WA 827 */
1346 if (!needs_nv12_wa(old_crtc_state) &&
1347 needs_nv12_wa(new_crtc_state))
1348 skl_wa_827(dev_priv, pipe, true);
1350 /* Wa_2006604312:icl,ehl */
1351 if (!needs_scalerclk_wa(old_crtc_state) &&
1352 needs_scalerclk_wa(new_crtc_state))
1353 icl_wa_scalerclkgating(dev_priv, pipe, true);
1355 /* Wa_1604331009:icl,jsl,ehl */
1356 if (!needs_cursorclk_wa(old_crtc_state) &&
1357 needs_cursorclk_wa(new_crtc_state))
1358 icl_wa_cursorclkgating(dev_priv, pipe, true);
1361 * Vblank time updates from the shadow to live plane control register
1362 * are blocked if the memory self-refresh mode is active at that
1363 * moment. So to make sure the plane gets truly disabled, disable
1364 * first the self-refresh mode. The self-refresh enable bit in turn
1365 * will be checked/applied by the HW only at the next frame start
1366 * event which is after the vblank start event, so we need to have a
1367 * wait-for-vblank between disabling the plane and the pipe.
1369 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1370 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1371 intel_crtc_wait_for_next_vblank(crtc);
1374 * IVB workaround: must disable low power watermarks for at least
1375 * one frame before enabling scaling. LP watermarks can be re-enabled
1376 * when scaling is disabled.
1378 * WaCxSRDisabledForSpriteScaling:ivb
1380 if (old_crtc_state->hw.active &&
1381 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1382 intel_crtc_wait_for_next_vblank(crtc);
1385 * If we're doing a modeset we don't need to do any
1386 * pre-vblank watermark programming here.
1388 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1390 * For platforms that support atomic watermarks, program the
1391 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1392 * will be the intermediate values that are safe for both pre- and
1393 * post- vblank; when vblank happens, the 'active' values will be set
1394 * to the final 'target' values and we'll do this again to get the
1395 * optimal watermarks. For gen9+ platforms, the values we program here
1396 * will be the final target values which will get automatically latched
1397 * at vblank time; no further programming will be necessary.
1399 * If a platform hasn't been transitioned to atomic watermarks yet,
1400 * we'll continue to update watermarks the old way, if flags tell
1403 if (!intel_initial_watermarks(state, crtc))
1404 if (new_crtc_state->update_wm_pre)
1405 intel_update_watermarks(dev_priv);
1409 * Gen2 reports pipe underruns whenever all planes are disabled.
1410 * So disable underrun reporting before all the planes get disabled.
1412 * We do this after .initial_watermarks() so that we have a
1413 * chance of catching underruns with the intermediate watermarks
1414 * vs. the old plane configuration.
1416 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1417 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1420 * WA for platforms where async address update enable bit
1421 * is double buffered and only latched at start of vblank.
1423 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1424 intel_crtc_async_flip_disable_wa(state, crtc);
1427 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1428 struct intel_crtc *crtc)
1430 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1431 const struct intel_crtc_state *new_crtc_state =
1432 intel_atomic_get_new_crtc_state(state, crtc);
1433 unsigned int update_mask = new_crtc_state->update_planes;
1434 const struct intel_plane_state *old_plane_state;
1435 struct intel_plane *plane;
1436 unsigned fb_bits = 0;
1439 intel_crtc_dpms_overlay_disable(crtc);
1441 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1442 if (crtc->pipe != plane->pipe ||
1443 !(update_mask & BIT(plane->id)))
1446 intel_plane_disable_arm(plane, new_crtc_state);
1448 if (old_plane_state->uapi.visible)
1449 fb_bits |= plane->frontbuffer_bit;
1452 intel_frontbuffer_flip(dev_priv, fb_bits);
1456 * intel_connector_primary_encoder - get the primary encoder for a connector
1457 * @connector: connector for which to return the encoder
1459 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1460 * all connectors to their encoder, except for DP-MST connectors which have
1461 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1462 * pointed to by as many DP-MST connectors as there are pipes.
1464 static struct intel_encoder *
1465 intel_connector_primary_encoder(struct intel_connector *connector)
1467 struct intel_encoder *encoder;
1469 if (connector->mst_port)
1470 return &dp_to_dig_port(connector->mst_port)->base;
1472 encoder = intel_attached_encoder(connector);
1473 drm_WARN_ON(connector->base.dev, !encoder);
1478 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1480 struct drm_i915_private *i915 = to_i915(state->base.dev);
1481 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1482 struct intel_crtc *crtc;
1483 struct drm_connector_state *new_conn_state;
1484 struct drm_connector *connector;
1488 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1489 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1491 if (i915->dpll.mgr) {
1492 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1493 if (intel_crtc_needs_modeset(new_crtc_state))
1496 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1497 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1501 if (!state->modeset)
1504 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1506 struct intel_connector *intel_connector;
1507 struct intel_encoder *encoder;
1508 struct intel_crtc *crtc;
1510 if (!intel_connector_needs_modeset(state, connector))
1513 intel_connector = to_intel_connector(connector);
1514 encoder = intel_connector_primary_encoder(intel_connector);
1515 if (!encoder->update_prepare)
1518 crtc = new_conn_state->crtc ?
1519 to_intel_crtc(new_conn_state->crtc) : NULL;
1520 encoder->update_prepare(state, encoder, crtc);
1524 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1526 struct drm_connector_state *new_conn_state;
1527 struct drm_connector *connector;
1530 if (!state->modeset)
1533 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1535 struct intel_connector *intel_connector;
1536 struct intel_encoder *encoder;
1537 struct intel_crtc *crtc;
1539 if (!intel_connector_needs_modeset(state, connector))
1542 intel_connector = to_intel_connector(connector);
1543 encoder = intel_connector_primary_encoder(intel_connector);
1544 if (!encoder->update_complete)
1547 crtc = new_conn_state->crtc ?
1548 to_intel_crtc(new_conn_state->crtc) : NULL;
1549 encoder->update_complete(state, encoder, crtc);
1553 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1554 struct intel_crtc *crtc)
1556 const struct intel_crtc_state *crtc_state =
1557 intel_atomic_get_new_crtc_state(state, crtc);
1558 const struct drm_connector_state *conn_state;
1559 struct drm_connector *conn;
1562 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1563 struct intel_encoder *encoder =
1564 to_intel_encoder(conn_state->best_encoder);
1566 if (conn_state->crtc != &crtc->base)
1569 if (encoder->pre_pll_enable)
1570 encoder->pre_pll_enable(state, encoder,
1571 crtc_state, conn_state);
1575 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1576 struct intel_crtc *crtc)
1578 const struct intel_crtc_state *crtc_state =
1579 intel_atomic_get_new_crtc_state(state, crtc);
1580 const struct drm_connector_state *conn_state;
1581 struct drm_connector *conn;
1584 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1585 struct intel_encoder *encoder =
1586 to_intel_encoder(conn_state->best_encoder);
1588 if (conn_state->crtc != &crtc->base)
1591 if (encoder->pre_enable)
1592 encoder->pre_enable(state, encoder,
1593 crtc_state, conn_state);
1597 static void intel_encoders_enable(struct intel_atomic_state *state,
1598 struct intel_crtc *crtc)
1600 const struct intel_crtc_state *crtc_state =
1601 intel_atomic_get_new_crtc_state(state, crtc);
1602 const struct drm_connector_state *conn_state;
1603 struct drm_connector *conn;
1606 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1607 struct intel_encoder *encoder =
1608 to_intel_encoder(conn_state->best_encoder);
1610 if (conn_state->crtc != &crtc->base)
1613 if (encoder->enable)
1614 encoder->enable(state, encoder,
1615 crtc_state, conn_state);
1616 intel_opregion_notify_encoder(encoder, true);
1620 static void intel_encoders_disable(struct intel_atomic_state *state,
1621 struct intel_crtc *crtc)
1623 const struct intel_crtc_state *old_crtc_state =
1624 intel_atomic_get_old_crtc_state(state, crtc);
1625 const struct drm_connector_state *old_conn_state;
1626 struct drm_connector *conn;
1629 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1630 struct intel_encoder *encoder =
1631 to_intel_encoder(old_conn_state->best_encoder);
1633 if (old_conn_state->crtc != &crtc->base)
1636 intel_opregion_notify_encoder(encoder, false);
1637 if (encoder->disable)
1638 encoder->disable(state, encoder,
1639 old_crtc_state, old_conn_state);
1643 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1644 struct intel_crtc *crtc)
1646 const struct intel_crtc_state *old_crtc_state =
1647 intel_atomic_get_old_crtc_state(state, crtc);
1648 const struct drm_connector_state *old_conn_state;
1649 struct drm_connector *conn;
1652 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1653 struct intel_encoder *encoder =
1654 to_intel_encoder(old_conn_state->best_encoder);
1656 if (old_conn_state->crtc != &crtc->base)
1659 if (encoder->post_disable)
1660 encoder->post_disable(state, encoder,
1661 old_crtc_state, old_conn_state);
1665 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1666 struct intel_crtc *crtc)
1668 const struct intel_crtc_state *old_crtc_state =
1669 intel_atomic_get_old_crtc_state(state, crtc);
1670 const struct drm_connector_state *old_conn_state;
1671 struct drm_connector *conn;
1674 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1675 struct intel_encoder *encoder =
1676 to_intel_encoder(old_conn_state->best_encoder);
1678 if (old_conn_state->crtc != &crtc->base)
1681 if (encoder->post_pll_disable)
1682 encoder->post_pll_disable(state, encoder,
1683 old_crtc_state, old_conn_state);
1687 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1688 struct intel_crtc *crtc)
1690 const struct intel_crtc_state *crtc_state =
1691 intel_atomic_get_new_crtc_state(state, crtc);
1692 const struct drm_connector_state *conn_state;
1693 struct drm_connector *conn;
1696 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1697 struct intel_encoder *encoder =
1698 to_intel_encoder(conn_state->best_encoder);
1700 if (conn_state->crtc != &crtc->base)
1703 if (encoder->update_pipe)
1704 encoder->update_pipe(state, encoder,
1705 crtc_state, conn_state);
1709 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1711 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1712 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1714 plane->disable_arm(plane, crtc_state);
1717 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1719 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1720 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1722 if (crtc_state->has_pch_encoder) {
1723 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1724 &crtc_state->fdi_m_n);
1725 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1726 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1727 &crtc_state->dp_m_n);
1728 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1729 &crtc_state->dp_m2_n2);
1732 intel_set_transcoder_timings(crtc_state);
1734 ilk_set_pipeconf(crtc_state);
1737 static void ilk_crtc_enable(struct intel_atomic_state *state,
1738 struct intel_crtc *crtc)
1740 const struct intel_crtc_state *new_crtc_state =
1741 intel_atomic_get_new_crtc_state(state, crtc);
1742 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1743 enum pipe pipe = crtc->pipe;
1745 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1749 * Sometimes spurious CPU pipe underruns happen during FDI
1750 * training, at least with VGA+HDMI cloning. Suppress them.
1752 * On ILK we get an occasional spurious CPU pipe underruns
1753 * between eDP port A enable and vdd enable. Also PCH port
1754 * enable seems to result in the occasional CPU pipe underrun.
1756 * Spurious PCH underruns also occur during PCH enabling.
1758 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1759 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1761 ilk_configure_cpu_transcoder(new_crtc_state);
1763 intel_set_pipe_src_size(new_crtc_state);
1765 crtc->active = true;
1767 intel_encoders_pre_enable(state, crtc);
1769 if (new_crtc_state->has_pch_encoder) {
1770 ilk_pch_pre_enable(state, crtc);
1772 assert_fdi_tx_disabled(dev_priv, pipe);
1773 assert_fdi_rx_disabled(dev_priv, pipe);
1776 ilk_pfit_enable(new_crtc_state);
1779 * On ILK+ LUT must be loaded before the pipe is running but with
1782 intel_color_load_luts(new_crtc_state);
1783 intel_color_commit_noarm(new_crtc_state);
1784 intel_color_commit_arm(new_crtc_state);
1785 /* update DSPCNTR to configure gamma for pipe bottom color */
1786 intel_disable_primary_plane(new_crtc_state);
1788 intel_initial_watermarks(state, crtc);
1789 intel_enable_transcoder(new_crtc_state);
1791 if (new_crtc_state->has_pch_encoder)
1792 ilk_pch_enable(state, crtc);
1794 intel_crtc_vblank_on(new_crtc_state);
1796 intel_encoders_enable(state, crtc);
1798 if (HAS_PCH_CPT(dev_priv))
1799 cpt_verify_modeset(dev_priv, pipe);
1802 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1803 * And a second vblank wait is needed at least on ILK with
1804 * some interlaced HDMI modes. Let's do the double wait always
1805 * in case there are more corner cases we don't know about.
1807 if (new_crtc_state->has_pch_encoder) {
1808 intel_crtc_wait_for_next_vblank(crtc);
1809 intel_crtc_wait_for_next_vblank(crtc);
1811 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1812 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1815 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1816 enum pipe pipe, bool apply)
1818 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1819 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1826 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1829 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1831 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1832 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1834 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1835 HSW_LINETIME(crtc_state->linetime) |
1836 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1839 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1841 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1842 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1843 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1846 val = intel_de_read(dev_priv, reg);
1847 val &= ~HSW_FRAME_START_DELAY_MASK;
1848 val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
1849 intel_de_write(dev_priv, reg, val);
1852 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1853 const struct intel_crtc_state *crtc_state)
1855 struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1858 * Enable sequence steps 1-7 on bigjoiner master
1860 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1861 intel_encoders_pre_pll_enable(state, master_crtc);
1863 if (crtc_state->shared_dpll)
1864 intel_enable_shared_dpll(crtc_state);
1866 if (intel_crtc_is_bigjoiner_slave(crtc_state))
1867 intel_encoders_pre_enable(state, master_crtc);
1870 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1872 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1873 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1874 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1876 if (crtc_state->has_pch_encoder) {
1877 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1878 &crtc_state->fdi_m_n);
1879 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1880 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1881 &crtc_state->dp_m_n);
1882 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1883 &crtc_state->dp_m2_n2);
1886 intel_set_transcoder_timings(crtc_state);
1888 if (cpu_transcoder != TRANSCODER_EDP)
1889 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
1890 crtc_state->pixel_multiplier - 1);
1892 hsw_set_frame_start_delay(crtc_state);
1894 hsw_set_transconf(crtc_state);
1897 static void hsw_crtc_enable(struct intel_atomic_state *state,
1898 struct intel_crtc *crtc)
1900 const struct intel_crtc_state *new_crtc_state =
1901 intel_atomic_get_new_crtc_state(state, crtc);
1902 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1903 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1904 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1905 bool psl_clkgate_wa;
1907 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1910 if (!new_crtc_state->bigjoiner_pipes) {
1911 intel_encoders_pre_pll_enable(state, crtc);
1913 if (new_crtc_state->shared_dpll)
1914 intel_enable_shared_dpll(new_crtc_state);
1916 intel_encoders_pre_enable(state, crtc);
1918 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1921 intel_dsc_enable(new_crtc_state);
1923 if (DISPLAY_VER(dev_priv) >= 13)
1924 intel_uncompressed_joiner_enable(new_crtc_state);
1926 intel_set_pipe_src_size(new_crtc_state);
1927 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1928 bdw_set_pipemisc(new_crtc_state);
1930 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1931 !transcoder_is_dsi(cpu_transcoder))
1932 hsw_configure_cpu_transcoder(new_crtc_state);
1934 crtc->active = true;
1936 /* Display WA #1180: WaDisableScalarClockGating: glk */
1937 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1938 new_crtc_state->pch_pfit.enabled;
1940 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1942 if (DISPLAY_VER(dev_priv) >= 9)
1943 skl_pfit_enable(new_crtc_state);
1945 ilk_pfit_enable(new_crtc_state);
1948 * On ILK+ LUT must be loaded before the pipe is running but with
1951 intel_color_load_luts(new_crtc_state);
1952 intel_color_commit_noarm(new_crtc_state);
1953 intel_color_commit_arm(new_crtc_state);
1954 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1955 if (DISPLAY_VER(dev_priv) < 9)
1956 intel_disable_primary_plane(new_crtc_state);
1958 hsw_set_linetime_wm(new_crtc_state);
1960 if (DISPLAY_VER(dev_priv) >= 11)
1961 icl_set_pipe_chicken(new_crtc_state);
1963 intel_initial_watermarks(state, crtc);
1965 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1966 intel_crtc_vblank_on(new_crtc_state);
1968 intel_encoders_enable(state, crtc);
1970 if (psl_clkgate_wa) {
1971 intel_crtc_wait_for_next_vblank(crtc);
1972 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1975 /* If we change the relative order between pipe/planes enabling, we need
1976 * to change the workaround. */
1977 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1978 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1979 struct intel_crtc *wa_crtc;
1981 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1983 intel_crtc_wait_for_next_vblank(wa_crtc);
1984 intel_crtc_wait_for_next_vblank(wa_crtc);
1988 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1990 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1992 enum pipe pipe = crtc->pipe;
1994 /* To avoid upsetting the power well on haswell only disable the pfit if
1995 * it's in use. The hw state code will make sure we get this right. */
1996 if (!old_crtc_state->pch_pfit.enabled)
1999 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
2000 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
2001 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
2004 static void ilk_crtc_disable(struct intel_atomic_state *state,
2005 struct intel_crtc *crtc)
2007 const struct intel_crtc_state *old_crtc_state =
2008 intel_atomic_get_old_crtc_state(state, crtc);
2009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2010 enum pipe pipe = crtc->pipe;
2013 * Sometimes spurious CPU pipe underruns happen when the
2014 * pipe is already disabled, but FDI RX/TX is still enabled.
2015 * Happens at least with VGA+HDMI cloning. Suppress them.
2017 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2018 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2020 intel_encoders_disable(state, crtc);
2022 intel_crtc_vblank_off(old_crtc_state);
2024 intel_disable_transcoder(old_crtc_state);
2026 ilk_pfit_disable(old_crtc_state);
2028 if (old_crtc_state->has_pch_encoder)
2029 ilk_pch_disable(state, crtc);
2031 intel_encoders_post_disable(state, crtc);
2033 if (old_crtc_state->has_pch_encoder)
2034 ilk_pch_post_disable(state, crtc);
2036 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2037 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2040 static void hsw_crtc_disable(struct intel_atomic_state *state,
2041 struct intel_crtc *crtc)
2043 const struct intel_crtc_state *old_crtc_state =
2044 intel_atomic_get_old_crtc_state(state, crtc);
2047 * FIXME collapse everything to one hook.
2048 * Need care with mst->ddi interactions.
2050 if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
2051 intel_encoders_disable(state, crtc);
2052 intel_encoders_post_disable(state, crtc);
2056 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2058 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2059 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2061 if (!crtc_state->gmch_pfit.control)
2065 * The panel fitter should only be adjusted whilst the pipe is disabled,
2066 * according to register description and PRM.
2068 drm_WARN_ON(&dev_priv->drm,
2069 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2070 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2072 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2073 crtc_state->gmch_pfit.pgm_ratios);
2074 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2076 /* Border color in case we don't scale up to the full screen. Black by
2077 * default, change to something else for debugging. */
2078 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2081 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2083 if (phy == PHY_NONE)
2085 else if (IS_DG2(dev_priv))
2087 * DG2 outputs labelled as "combo PHY" in the bspec use
2088 * SNPS PHYs with completely different programming,
2089 * hence we always return false here.
2092 else if (IS_ALDERLAKE_S(dev_priv))
2093 return phy <= PHY_E;
2094 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2095 return phy <= PHY_D;
2096 else if (IS_JSL_EHL(dev_priv))
2097 return phy <= PHY_C;
2098 else if (DISPLAY_VER(dev_priv) >= 11)
2099 return phy <= PHY_B;
2104 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2106 if (IS_DG2(dev_priv))
2107 /* DG2's "TC1" output uses a SNPS PHY */
2109 else if (IS_ALDERLAKE_P(dev_priv))
2110 return phy >= PHY_F && phy <= PHY_I;
2111 else if (IS_TIGERLAKE(dev_priv))
2112 return phy >= PHY_D && phy <= PHY_I;
2113 else if (IS_ICELAKE(dev_priv))
2114 return phy >= PHY_C && phy <= PHY_F;
2119 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2121 if (phy == PHY_NONE)
2123 else if (IS_DG2(dev_priv))
2125 * All four "combo" ports and the TC1 port (PHY E) use
2128 return phy <= PHY_E;
2133 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2135 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2136 return PHY_D + port - PORT_D_XELPD;
2137 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2138 return PHY_F + port - PORT_TC1;
2139 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2140 return PHY_B + port - PORT_TC1;
2141 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2142 return PHY_C + port - PORT_TC1;
2143 else if (IS_JSL_EHL(i915) && port == PORT_D)
2146 return PHY_A + port - PORT_A;
2149 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2151 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2152 return TC_PORT_NONE;
2154 if (DISPLAY_VER(dev_priv) >= 12)
2155 return TC_PORT_1 + port - PORT_TC1;
2157 return TC_PORT_1 + port - PORT_C;
2160 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2164 return POWER_DOMAIN_PORT_DDI_A_LANES;
2166 return POWER_DOMAIN_PORT_DDI_B_LANES;
2168 return POWER_DOMAIN_PORT_DDI_C_LANES;
2170 return POWER_DOMAIN_PORT_DDI_D_LANES;
2172 return POWER_DOMAIN_PORT_DDI_E_LANES;
2174 return POWER_DOMAIN_PORT_DDI_F_LANES;
2176 return POWER_DOMAIN_PORT_DDI_G_LANES;
2178 return POWER_DOMAIN_PORT_DDI_H_LANES;
2180 return POWER_DOMAIN_PORT_DDI_I_LANES;
2183 return POWER_DOMAIN_PORT_OTHER;
2187 enum intel_display_power_domain
2188 intel_aux_power_domain(struct intel_digital_port *dig_port)
2190 if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2191 switch (dig_port->aux_ch) {
2193 return POWER_DOMAIN_AUX_C_TBT;
2195 return POWER_DOMAIN_AUX_D_TBT;
2197 return POWER_DOMAIN_AUX_E_TBT;
2199 return POWER_DOMAIN_AUX_F_TBT;
2201 return POWER_DOMAIN_AUX_G_TBT;
2203 return POWER_DOMAIN_AUX_H_TBT;
2205 return POWER_DOMAIN_AUX_I_TBT;
2207 MISSING_CASE(dig_port->aux_ch);
2208 return POWER_DOMAIN_AUX_C_TBT;
2212 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2216 * Converts aux_ch to power_domain without caring about TBT ports for that use
2217 * intel_aux_power_domain()
2219 enum intel_display_power_domain
2220 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2224 return POWER_DOMAIN_AUX_A;
2226 return POWER_DOMAIN_AUX_B;
2228 return POWER_DOMAIN_AUX_C;
2230 return POWER_DOMAIN_AUX_D;
2232 return POWER_DOMAIN_AUX_E;
2234 return POWER_DOMAIN_AUX_F;
2236 return POWER_DOMAIN_AUX_G;
2238 return POWER_DOMAIN_AUX_H;
2240 return POWER_DOMAIN_AUX_I;
2242 MISSING_CASE(aux_ch);
2243 return POWER_DOMAIN_AUX_A;
2247 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2249 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2250 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2251 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2252 struct drm_encoder *encoder;
2253 enum pipe pipe = crtc->pipe;
2256 if (!crtc_state->hw.active)
2259 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2260 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2261 if (crtc_state->pch_pfit.enabled ||
2262 crtc_state->pch_pfit.force_thru)
2263 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2265 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2266 crtc_state->uapi.encoder_mask) {
2267 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2269 mask |= BIT_ULL(intel_encoder->power_domain);
2272 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2273 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2275 if (crtc_state->shared_dpll)
2276 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2278 if (crtc_state->dsc.compression_enable)
2279 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2285 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2287 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2288 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2289 enum intel_display_power_domain domain;
2290 u64 domains, new_domains, old_domains;
2292 domains = get_crtc_power_domains(crtc_state);
2294 new_domains = domains & ~crtc->enabled_power_domains.mask;
2295 old_domains = crtc->enabled_power_domains.mask & ~domains;
2297 for_each_power_domain(domain, new_domains)
2298 intel_display_power_get_in_set(dev_priv,
2299 &crtc->enabled_power_domains,
2305 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2308 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2309 &crtc->enabled_power_domains,
2313 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2315 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2316 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2318 if (intel_crtc_has_dp_encoder(crtc_state)) {
2319 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2320 &crtc_state->dp_m_n);
2321 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2322 &crtc_state->dp_m2_n2);
2325 intel_set_transcoder_timings(crtc_state);
2327 i9xx_set_pipeconf(crtc_state);
2330 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2331 struct intel_crtc *crtc)
2333 const struct intel_crtc_state *new_crtc_state =
2334 intel_atomic_get_new_crtc_state(state, crtc);
2335 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2336 enum pipe pipe = crtc->pipe;
2338 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2341 i9xx_configure_cpu_transcoder(new_crtc_state);
2343 intel_set_pipe_src_size(new_crtc_state);
2345 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2346 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2347 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2350 crtc->active = true;
2352 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2354 intel_encoders_pre_pll_enable(state, crtc);
2356 if (IS_CHERRYVIEW(dev_priv))
2357 chv_enable_pll(new_crtc_state);
2359 vlv_enable_pll(new_crtc_state);
2361 intel_encoders_pre_enable(state, crtc);
2363 i9xx_pfit_enable(new_crtc_state);
2365 intel_color_load_luts(new_crtc_state);
2366 intel_color_commit_noarm(new_crtc_state);
2367 intel_color_commit_arm(new_crtc_state);
2368 /* update DSPCNTR to configure gamma for pipe bottom color */
2369 intel_disable_primary_plane(new_crtc_state);
2371 intel_initial_watermarks(state, crtc);
2372 intel_enable_transcoder(new_crtc_state);
2374 intel_crtc_vblank_on(new_crtc_state);
2376 intel_encoders_enable(state, crtc);
2379 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2380 struct intel_crtc *crtc)
2382 const struct intel_crtc_state *new_crtc_state =
2383 intel_atomic_get_new_crtc_state(state, crtc);
2384 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2385 enum pipe pipe = crtc->pipe;
2387 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2390 i9xx_configure_cpu_transcoder(new_crtc_state);
2392 intel_set_pipe_src_size(new_crtc_state);
2394 crtc->active = true;
2396 if (DISPLAY_VER(dev_priv) != 2)
2397 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2399 intel_encoders_pre_enable(state, crtc);
2401 i9xx_enable_pll(new_crtc_state);
2403 i9xx_pfit_enable(new_crtc_state);
2405 intel_color_load_luts(new_crtc_state);
2406 intel_color_commit_noarm(new_crtc_state);
2407 intel_color_commit_arm(new_crtc_state);
2408 /* update DSPCNTR to configure gamma for pipe bottom color */
2409 intel_disable_primary_plane(new_crtc_state);
2411 if (!intel_initial_watermarks(state, crtc))
2412 intel_update_watermarks(dev_priv);
2413 intel_enable_transcoder(new_crtc_state);
2415 intel_crtc_vblank_on(new_crtc_state);
2417 intel_encoders_enable(state, crtc);
2419 /* prevents spurious underruns */
2420 if (DISPLAY_VER(dev_priv) == 2)
2421 intel_crtc_wait_for_next_vblank(crtc);
2424 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2426 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2429 if (!old_crtc_state->gmch_pfit.control)
2432 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2434 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2435 intel_de_read(dev_priv, PFIT_CONTROL));
2436 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2439 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2440 struct intel_crtc *crtc)
2442 struct intel_crtc_state *old_crtc_state =
2443 intel_atomic_get_old_crtc_state(state, crtc);
2444 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2445 enum pipe pipe = crtc->pipe;
2448 * On gen2 planes are double buffered but the pipe isn't, so we must
2449 * wait for planes to fully turn off before disabling the pipe.
2451 if (DISPLAY_VER(dev_priv) == 2)
2452 intel_crtc_wait_for_next_vblank(crtc);
2454 intel_encoders_disable(state, crtc);
2456 intel_crtc_vblank_off(old_crtc_state);
2458 intel_disable_transcoder(old_crtc_state);
2460 i9xx_pfit_disable(old_crtc_state);
2462 intel_encoders_post_disable(state, crtc);
2464 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2465 if (IS_CHERRYVIEW(dev_priv))
2466 chv_disable_pll(dev_priv, pipe);
2467 else if (IS_VALLEYVIEW(dev_priv))
2468 vlv_disable_pll(dev_priv, pipe);
2470 i9xx_disable_pll(old_crtc_state);
2473 intel_encoders_post_pll_disable(state, crtc);
2475 if (DISPLAY_VER(dev_priv) != 2)
2476 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2478 if (!dev_priv->wm_disp->initial_watermarks)
2479 intel_update_watermarks(dev_priv);
2481 /* clock the pipe down to 640x480@60 to potentially save power */
2482 if (IS_I830(dev_priv))
2483 i830_enable_pipe(dev_priv, pipe);
2486 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2487 struct drm_modeset_acquire_ctx *ctx)
2489 struct intel_encoder *encoder;
2490 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2491 struct intel_bw_state *bw_state =
2492 to_intel_bw_state(dev_priv->bw_obj.state);
2493 struct intel_cdclk_state *cdclk_state =
2494 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2495 struct intel_dbuf_state *dbuf_state =
2496 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2497 struct intel_crtc_state *crtc_state =
2498 to_intel_crtc_state(crtc->base.state);
2499 struct intel_plane *plane;
2500 struct drm_atomic_state *state;
2501 struct intel_crtc_state *temp_crtc_state;
2502 enum pipe pipe = crtc->pipe;
2505 if (!crtc_state->hw.active)
2508 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2509 const struct intel_plane_state *plane_state =
2510 to_intel_plane_state(plane->base.state);
2512 if (plane_state->uapi.visible)
2513 intel_plane_disable_noatomic(crtc, plane);
2516 state = drm_atomic_state_alloc(&dev_priv->drm);
2518 drm_dbg_kms(&dev_priv->drm,
2519 "failed to disable [CRTC:%d:%s], out of memory",
2520 crtc->base.base.id, crtc->base.name);
2524 state->acquire_ctx = ctx;
2526 /* Everything's already locked, -EDEADLK can't happen. */
2527 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2528 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2530 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2532 dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2534 drm_atomic_state_put(state);
2536 drm_dbg_kms(&dev_priv->drm,
2537 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2538 crtc->base.base.id, crtc->base.name);
2540 crtc->active = false;
2541 crtc->base.enabled = false;
2543 drm_WARN_ON(&dev_priv->drm,
2544 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2545 crtc_state->uapi.active = false;
2546 crtc_state->uapi.connector_mask = 0;
2547 crtc_state->uapi.encoder_mask = 0;
2548 intel_crtc_free_hw_state(crtc_state);
2549 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2551 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2552 encoder->base.crtc = NULL;
2554 intel_fbc_disable(crtc);
2555 intel_update_watermarks(dev_priv);
2556 intel_disable_shared_dpll(crtc_state);
2558 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2560 cdclk_state->min_cdclk[pipe] = 0;
2561 cdclk_state->min_voltage_level[pipe] = 0;
2562 cdclk_state->active_pipes &= ~BIT(pipe);
2564 dbuf_state->active_pipes &= ~BIT(pipe);
2566 bw_state->data_rate[pipe] = 0;
2567 bw_state->num_active_planes[pipe] = 0;
2571 * turn all crtc's off, but do not adjust state
2572 * This has to be paired with a call to intel_modeset_setup_hw_state.
2574 int intel_display_suspend(struct drm_device *dev)
2576 struct drm_i915_private *dev_priv = to_i915(dev);
2577 struct drm_atomic_state *state;
2580 if (!HAS_DISPLAY(dev_priv))
2583 state = drm_atomic_helper_suspend(dev);
2584 ret = PTR_ERR_OR_ZERO(state);
2586 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2589 dev_priv->modeset_restore_state = state;
2593 void intel_encoder_destroy(struct drm_encoder *encoder)
2595 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2597 drm_encoder_cleanup(encoder);
2598 kfree(intel_encoder);
2601 /* Cross check the actual hw state with our own modeset state tracking (and it's
2602 * internal consistency). */
2603 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2604 struct drm_connector_state *conn_state)
2606 struct intel_connector *connector = to_intel_connector(conn_state->connector);
2607 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2609 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2610 connector->base.base.id, connector->base.name);
2612 if (connector->get_hw_state(connector)) {
2613 struct intel_encoder *encoder = intel_attached_encoder(connector);
2615 I915_STATE_WARN(!crtc_state,
2616 "connector enabled without attached crtc\n");
2621 I915_STATE_WARN(!crtc_state->hw.active,
2622 "connector is active, but attached crtc isn't\n");
2624 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2627 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2628 "atomic encoder doesn't match attached encoder\n");
2630 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2631 "attached encoder crtc differs from connector crtc\n");
2633 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2634 "attached crtc is active, but connector isn't\n");
2635 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2636 "best encoder set without crtc!\n");
2640 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2642 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2644 /* GDG double wide on either pipe, otherwise pipe A only */
2645 return DISPLAY_VER(dev_priv) < 4 &&
2646 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2649 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2651 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2652 struct drm_rect src;
2655 * We only use IF-ID interlacing. If we ever use
2656 * PF-ID we'll need to adjust the pixel_rate here.
2659 if (!crtc_state->pch_pfit.enabled)
2662 drm_rect_init(&src, 0, 0,
2663 drm_rect_width(&crtc_state->pipe_src) << 16,
2664 drm_rect_height(&crtc_state->pipe_src) << 16);
2666 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2670 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2671 const struct drm_display_mode *timings)
2673 mode->hdisplay = timings->crtc_hdisplay;
2674 mode->htotal = timings->crtc_htotal;
2675 mode->hsync_start = timings->crtc_hsync_start;
2676 mode->hsync_end = timings->crtc_hsync_end;
2678 mode->vdisplay = timings->crtc_vdisplay;
2679 mode->vtotal = timings->crtc_vtotal;
2680 mode->vsync_start = timings->crtc_vsync_start;
2681 mode->vsync_end = timings->crtc_vsync_end;
2683 mode->flags = timings->flags;
2684 mode->type = DRM_MODE_TYPE_DRIVER;
2686 mode->clock = timings->crtc_clock;
2688 drm_mode_set_name(mode);
2691 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2693 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2695 if (HAS_GMCH(dev_priv))
2696 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2697 crtc_state->pixel_rate =
2698 crtc_state->hw.pipe_mode.crtc_clock;
2700 crtc_state->pixel_rate =
2701 ilk_pipe_pixel_rate(crtc_state);
2704 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2705 struct drm_display_mode *mode)
2707 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2712 mode->crtc_clock /= num_pipes;
2713 mode->crtc_hdisplay /= num_pipes;
2714 mode->crtc_hblank_start /= num_pipes;
2715 mode->crtc_hblank_end /= num_pipes;
2716 mode->crtc_hsync_start /= num_pipes;
2717 mode->crtc_hsync_end /= num_pipes;
2718 mode->crtc_htotal /= num_pipes;
2721 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2722 struct drm_display_mode *mode)
2724 int overlap = crtc_state->splitter.pixel_overlap;
2725 int n = crtc_state->splitter.link_count;
2727 if (!crtc_state->splitter.enable)
2731 * eDP MSO uses segment timings from EDID for transcoder
2732 * timings, but full mode for everything else.
2734 * h_full = (h_segment - pixel_overlap) * link_count
2736 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2737 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2738 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2739 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2740 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2741 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2742 mode->crtc_clock *= n;
2745 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2747 struct drm_display_mode *mode = &crtc_state->hw.mode;
2748 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2749 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2752 * Start with the adjusted_mode crtc timings, which
2753 * have been filled with the transcoder timings.
2755 drm_mode_copy(pipe_mode, adjusted_mode);
2757 /* Expand MSO per-segment transcoder timings to full */
2758 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2761 * We want the full numbers in adjusted_mode normal timings,
2762 * adjusted_mode crtc timings are left with the raw transcoder
2765 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2767 /* Populate the "user" mode with full numbers */
2768 drm_mode_copy(mode, pipe_mode);
2769 intel_mode_from_crtc_timings(mode, mode);
2770 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2771 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2772 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2774 /* Derive per-pipe timings in case bigjoiner is used */
2775 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2776 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2778 intel_crtc_compute_pixel_rate(crtc_state);
2781 static void intel_encoder_get_config(struct intel_encoder *encoder,
2782 struct intel_crtc_state *crtc_state)
2784 encoder->get_config(encoder, crtc_state);
2786 intel_crtc_readout_derived_state(crtc_state);
2789 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2791 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2797 width = drm_rect_width(&crtc_state->pipe_src);
2798 height = drm_rect_height(&crtc_state->pipe_src);
2800 drm_rect_init(&crtc_state->pipe_src, 0, 0,
2801 width / num_pipes, height);
2804 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2806 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2807 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2809 intel_bigjoiner_compute_pipe_src(crtc_state);
2812 * Pipe horizontal size must be even in:
2814 * - LVDS dual channel mode
2815 * - Double wide pipe
2817 if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2818 if (crtc_state->double_wide) {
2819 drm_dbg_kms(&i915->drm,
2820 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2821 crtc->base.base.id, crtc->base.name);
2825 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2826 intel_is_dual_link_lvds(i915)) {
2827 drm_dbg_kms(&i915->drm,
2828 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2829 crtc->base.base.id, crtc->base.name);
2837 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2839 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2840 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2841 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2842 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2843 int clock_limit = i915->max_dotclk_freq;
2846 * Start with the adjusted_mode crtc timings, which
2847 * have been filled with the transcoder timings.
2849 drm_mode_copy(pipe_mode, adjusted_mode);
2851 /* Expand MSO per-segment transcoder timings to full */
2852 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2854 /* Derive per-pipe timings in case bigjoiner is used */
2855 intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2856 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2858 if (DISPLAY_VER(i915) < 4) {
2859 clock_limit = i915->max_cdclk_freq * 9 / 10;
2862 * Enable double wide mode when the dot clock
2863 * is > 90% of the (display) core speed.
2865 if (intel_crtc_supports_double_wide(crtc) &&
2866 pipe_mode->crtc_clock > clock_limit) {
2867 clock_limit = i915->max_dotclk_freq;
2868 crtc_state->double_wide = true;
2872 if (pipe_mode->crtc_clock > clock_limit) {
2873 drm_dbg_kms(&i915->drm,
2874 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2875 crtc->base.base.id, crtc->base.name,
2876 pipe_mode->crtc_clock, clock_limit,
2877 str_yes_no(crtc_state->double_wide));
2884 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2885 struct intel_crtc_state *crtc_state)
2889 ret = intel_crtc_compute_pipe_src(crtc_state);
2893 ret = intel_crtc_compute_pipe_mode(crtc_state);
2897 intel_crtc_compute_pixel_rate(crtc_state);
2899 if (crtc_state->has_pch_encoder)
2900 return ilk_fdi_compute_config(crtc, crtc_state);
2906 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2908 while (*num > DATA_LINK_M_N_MASK ||
2909 *den > DATA_LINK_M_N_MASK) {
2915 static void compute_m_n(unsigned int m, unsigned int n,
2916 u32 *ret_m, u32 *ret_n,
2920 * Several DP dongles in particular seem to be fussy about
2921 * too large link M/N values. Give N value as 0x8000 that
2922 * should be acceptable by specific devices. 0x8000 is the
2923 * specified fixed N value for asynchronous clock mode,
2924 * which the devices expect also in synchronous clock mode.
2927 *ret_n = DP_LINK_CONSTANT_N_VALUE;
2929 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2931 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2932 intel_reduce_m_n_ratio(ret_m, ret_n);
2936 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2937 int pixel_clock, int link_clock,
2938 struct intel_link_m_n *m_n,
2939 bool constant_n, bool fec_enable)
2941 u32 data_clock = bits_per_pixel * pixel_clock;
2944 data_clock = intel_dp_mode_to_fec_clock(data_clock);
2947 compute_m_n(data_clock,
2948 link_clock * nlanes * 8,
2949 &m_n->data_m, &m_n->data_n,
2952 compute_m_n(pixel_clock, link_clock,
2953 &m_n->link_m, &m_n->link_n,
2957 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2960 * There may be no VBT; and if the BIOS enabled SSC we can
2961 * just keep using it to avoid unnecessary flicker. Whereas if the
2962 * BIOS isn't using it, don't assume it will work even if the VBT
2963 * indicates as much.
2965 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2966 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2970 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2971 drm_dbg_kms(&dev_priv->drm,
2972 "SSC %s by BIOS, overriding VBT which says %s\n",
2973 str_enabled_disabled(bios_lvds_use_ssc),
2974 str_enabled_disabled(dev_priv->vbt.lvds_use_ssc));
2975 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
2980 void intel_zero_m_n(struct intel_link_m_n *m_n)
2982 /* corresponds to 0 register value */
2983 memset(m_n, 0, sizeof(*m_n));
2987 void intel_set_m_n(struct drm_i915_private *i915,
2988 const struct intel_link_m_n *m_n,
2989 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2990 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2992 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2993 intel_de_write(i915, data_n_reg, m_n->data_n);
2994 intel_de_write(i915, link_m_reg, m_n->link_m);
2996 * On BDW+ writing LINK_N arms the double buffered update
2997 * of all the M/N registers, so it must be written last.
2999 intel_de_write(i915, link_n_reg, m_n->link_n);
3002 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3003 enum transcoder transcoder)
3005 if (IS_HASWELL(dev_priv))
3006 return transcoder == TRANSCODER_EDP;
3008 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
3011 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
3012 enum transcoder transcoder,
3013 const struct intel_link_m_n *m_n)
3015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3016 enum pipe pipe = crtc->pipe;
3018 if (DISPLAY_VER(dev_priv) >= 5)
3019 intel_set_m_n(dev_priv, m_n,
3020 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3021 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3023 intel_set_m_n(dev_priv, m_n,
3024 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3025 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3028 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
3029 enum transcoder transcoder,
3030 const struct intel_link_m_n *m_n)
3032 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3034 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3037 intel_set_m_n(dev_priv, m_n,
3038 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3039 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3042 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3044 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3045 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3046 enum pipe pipe = crtc->pipe;
3047 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3048 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3049 u32 crtc_vtotal, crtc_vblank_end;
3052 /* We need to be careful not to changed the adjusted mode, for otherwise
3053 * the hw state checker will get angry at the mismatch. */
3054 crtc_vtotal = adjusted_mode->crtc_vtotal;
3055 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3057 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3058 /* the chip adds 2 halflines automatically */
3060 crtc_vblank_end -= 1;
3062 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3063 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3065 vsyncshift = adjusted_mode->crtc_hsync_start -
3066 adjusted_mode->crtc_htotal / 2;
3068 vsyncshift += adjusted_mode->crtc_htotal;
3071 if (DISPLAY_VER(dev_priv) > 3)
3072 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3075 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3076 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3077 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3078 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3079 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3080 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3082 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3083 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3084 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3085 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3086 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3087 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3089 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3090 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3091 * documented on the DDI_FUNC_CTL register description, EDP Input Select
3093 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3094 (pipe == PIPE_B || pipe == PIPE_C))
3095 intel_de_write(dev_priv, VTOTAL(pipe),
3096 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3100 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3102 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3103 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3104 int width = drm_rect_width(&crtc_state->pipe_src);
3105 int height = drm_rect_height(&crtc_state->pipe_src);
3106 enum pipe pipe = crtc->pipe;
3108 /* pipesrc controls the size that is scaled from, which should
3109 * always be the user's requested size.
3111 intel_de_write(dev_priv, PIPESRC(pipe),
3112 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
3115 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3117 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3118 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3120 if (DISPLAY_VER(dev_priv) == 2)
3123 if (DISPLAY_VER(dev_priv) >= 9 ||
3124 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3125 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3127 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3130 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3131 struct intel_crtc_state *pipe_config)
3133 struct drm_device *dev = crtc->base.dev;
3134 struct drm_i915_private *dev_priv = to_i915(dev);
3135 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3138 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3139 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3140 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3142 if (!transcoder_is_dsi(cpu_transcoder)) {
3143 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3144 pipe_config->hw.adjusted_mode.crtc_hblank_start =
3146 pipe_config->hw.adjusted_mode.crtc_hblank_end =
3147 ((tmp >> 16) & 0xffff) + 1;
3149 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3150 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3151 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3153 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3154 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3155 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3157 if (!transcoder_is_dsi(cpu_transcoder)) {
3158 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3159 pipe_config->hw.adjusted_mode.crtc_vblank_start =
3161 pipe_config->hw.adjusted_mode.crtc_vblank_end =
3162 ((tmp >> 16) & 0xffff) + 1;
3164 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3165 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3166 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3168 if (intel_pipe_is_interlaced(pipe_config)) {
3169 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3170 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3171 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3175 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
3177 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3178 int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
3179 enum pipe master_pipe, pipe = crtc->pipe;
3185 master_pipe = bigjoiner_master_pipe(crtc_state);
3186 width = drm_rect_width(&crtc_state->pipe_src);
3188 drm_rect_translate_to(&crtc_state->pipe_src,
3189 (pipe - master_pipe) * width, 0);
3192 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3193 struct intel_crtc_state *pipe_config)
3195 struct drm_device *dev = crtc->base.dev;
3196 struct drm_i915_private *dev_priv = to_i915(dev);
3199 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3201 drm_rect_init(&pipe_config->pipe_src, 0, 0,
3202 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
3203 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
3205 intel_bigjoiner_adjust_pipe_src(pipe_config);
3208 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3210 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3211 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3214 /* we keep both pipes enabled on 830 */
3215 if (IS_I830(dev_priv))
3216 pipeconf |= PIPECONF_ENABLE;
3218 if (crtc_state->double_wide)
3219 pipeconf |= PIPECONF_DOUBLE_WIDE;
3221 /* only g4x and later have fancy bpc/dither controls */
3222 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3223 IS_CHERRYVIEW(dev_priv)) {
3224 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3225 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3226 pipeconf |= PIPECONF_DITHER_EN |
3227 PIPECONF_DITHER_TYPE_SP;
3229 switch (crtc_state->pipe_bpp) {
3231 pipeconf |= PIPECONF_BPC_6;
3234 pipeconf |= PIPECONF_BPC_8;
3237 pipeconf |= PIPECONF_BPC_10;
3240 /* Case prevented by intel_choose_pipe_bpp_dither. */
3245 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3246 if (DISPLAY_VER(dev_priv) < 4 ||
3247 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3248 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3250 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3252 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
3255 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3256 crtc_state->limited_color_range)
3257 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3259 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3261 pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3263 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3264 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3267 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3269 if (IS_I830(dev_priv))
3272 return DISPLAY_VER(dev_priv) >= 4 ||
3273 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3276 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3278 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3279 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3282 if (!i9xx_has_pfit(dev_priv))
3285 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3286 if (!(tmp & PFIT_ENABLE))
3289 /* Check whether the pfit is attached to our pipe. */
3290 if (DISPLAY_VER(dev_priv) < 4) {
3291 if (crtc->pipe != PIPE_B)
3294 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3298 crtc_state->gmch_pfit.control = tmp;
3299 crtc_state->gmch_pfit.pgm_ratios =
3300 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3303 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3304 struct intel_crtc_state *pipe_config)
3306 struct drm_device *dev = crtc->base.dev;
3307 struct drm_i915_private *dev_priv = to_i915(dev);
3308 enum pipe pipe = crtc->pipe;
3311 int refclk = 100000;
3313 /* In case of DSI, DPLL will not be used */
3314 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3317 vlv_dpio_get(dev_priv);
3318 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3319 vlv_dpio_put(dev_priv);
3321 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3322 clock.m2 = mdiv & DPIO_M2DIV_MASK;
3323 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3324 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3325 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3327 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3330 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3331 struct intel_crtc_state *pipe_config)
3333 struct drm_device *dev = crtc->base.dev;
3334 struct drm_i915_private *dev_priv = to_i915(dev);
3335 enum pipe pipe = crtc->pipe;
3336 enum dpio_channel port = vlv_pipe_to_channel(pipe);
3338 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3339 int refclk = 100000;
3341 /* In case of DSI, DPLL will not be used */
3342 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3345 vlv_dpio_get(dev_priv);
3346 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3347 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3348 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3349 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3350 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3351 vlv_dpio_put(dev_priv);
3353 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3354 clock.m2 = (pll_dw0 & 0xff) << 22;
3355 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3356 clock.m2 |= pll_dw2 & 0x3fffff;
3357 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3358 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3359 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3361 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3364 static enum intel_output_format
3365 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3370 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3372 if (tmp & PIPEMISC_YUV420_ENABLE) {
3373 /* We support 4:2:0 in full blend mode only */
3374 drm_WARN_ON(&dev_priv->drm,
3375 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3377 return INTEL_OUTPUT_FORMAT_YCBCR420;
3378 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3379 return INTEL_OUTPUT_FORMAT_YCBCR444;
3381 return INTEL_OUTPUT_FORMAT_RGB;
3385 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3387 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3388 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3389 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3390 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3393 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3395 if (tmp & DISP_PIPE_GAMMA_ENABLE)
3396 crtc_state->gamma_enable = true;
3398 if (!HAS_GMCH(dev_priv) &&
3399 tmp & DISP_PIPE_CSC_ENABLE)
3400 crtc_state->csc_enable = true;
3403 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3404 struct intel_crtc_state *pipe_config)
3406 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3407 enum intel_display_power_domain power_domain;
3408 intel_wakeref_t wakeref;
3412 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3413 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3417 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3418 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3419 pipe_config->shared_dpll = NULL;
3423 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3424 if (!(tmp & PIPECONF_ENABLE))
3427 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3428 IS_CHERRYVIEW(dev_priv)) {
3429 switch (tmp & PIPECONF_BPC_MASK) {
3430 case PIPECONF_BPC_6:
3431 pipe_config->pipe_bpp = 18;
3433 case PIPECONF_BPC_8:
3434 pipe_config->pipe_bpp = 24;
3436 case PIPECONF_BPC_10:
3437 pipe_config->pipe_bpp = 30;
3445 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3446 (tmp & PIPECONF_COLOR_RANGE_SELECT))
3447 pipe_config->limited_color_range = true;
3449 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
3451 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3453 if (IS_CHERRYVIEW(dev_priv))
3454 pipe_config->cgm_mode = intel_de_read(dev_priv,
3455 CGM_PIPE_MODE(crtc->pipe));
3457 i9xx_get_pipe_color_config(pipe_config);
3458 intel_color_get_config(pipe_config);
3460 if (DISPLAY_VER(dev_priv) < 4)
3461 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3463 intel_get_transcoder_timings(crtc, pipe_config);
3464 intel_get_pipe_src_size(crtc, pipe_config);
3466 i9xx_get_pfit_config(pipe_config);
3468 if (DISPLAY_VER(dev_priv) >= 4) {
3469 /* No way to read it out on pipes B and C */
3470 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3471 tmp = dev_priv->chv_dpll_md[crtc->pipe];
3473 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3474 pipe_config->pixel_multiplier =
3475 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3476 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3477 pipe_config->dpll_hw_state.dpll_md = tmp;
3478 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3479 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3480 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3481 pipe_config->pixel_multiplier =
3482 ((tmp & SDVO_MULTIPLIER_MASK)
3483 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3485 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3486 * port and will be fixed up in the encoder->get_config
3488 pipe_config->pixel_multiplier = 1;
3490 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3492 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3493 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3495 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3498 /* Mask out read-only status bits. */
3499 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3500 DPLL_PORTC_READY_MASK |
3501 DPLL_PORTB_READY_MASK);
3504 if (IS_CHERRYVIEW(dev_priv))
3505 chv_crtc_clock_get(crtc, pipe_config);
3506 else if (IS_VALLEYVIEW(dev_priv))
3507 vlv_crtc_clock_get(crtc, pipe_config);
3509 i9xx_crtc_clock_get(crtc, pipe_config);
3512 * Normally the dotclock is filled in by the encoder .get_config()
3513 * but in case the pipe is enabled w/o any ports we need a sane
3516 pipe_config->hw.adjusted_mode.crtc_clock =
3517 pipe_config->port_clock / pipe_config->pixel_multiplier;
3522 intel_display_power_put(dev_priv, power_domain, wakeref);
3527 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3529 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3530 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3531 enum pipe pipe = crtc->pipe;
3536 switch (crtc_state->pipe_bpp) {
3538 val |= PIPECONF_BPC_6;
3541 val |= PIPECONF_BPC_8;
3544 val |= PIPECONF_BPC_10;
3547 val |= PIPECONF_BPC_12;
3550 /* Case prevented by intel_choose_pipe_bpp_dither. */
3554 if (crtc_state->dither)
3555 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3557 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3558 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3560 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3563 * This would end up with an odd purple hue over
3564 * the entire display. Make sure we don't do it.
3566 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3567 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3569 if (crtc_state->limited_color_range &&
3570 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3571 val |= PIPECONF_COLOR_RANGE_SELECT;
3573 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3574 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3576 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3578 val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3579 val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3581 intel_de_write(dev_priv, PIPECONF(pipe), val);
3582 intel_de_posting_read(dev_priv, PIPECONF(pipe));
3585 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3587 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3589 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3592 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3593 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3595 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3596 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3598 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3600 if (IS_HASWELL(dev_priv) &&
3601 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3602 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3604 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3605 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3608 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3610 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3611 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3614 switch (crtc_state->pipe_bpp) {
3616 val |= PIPEMISC_BPC_6;
3619 val |= PIPEMISC_BPC_8;
3622 val |= PIPEMISC_BPC_10;
3625 /* Port output 12BPC defined for ADLP+ */
3626 if (DISPLAY_VER(dev_priv) > 12)
3627 val |= PIPEMISC_BPC_12_ADLP;
3630 MISSING_CASE(crtc_state->pipe_bpp);
3634 if (crtc_state->dither)
3635 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3637 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3638 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3639 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3641 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3642 val |= PIPEMISC_YUV420_ENABLE |
3643 PIPEMISC_YUV420_MODE_FULL_BLEND;
3645 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3646 val |= PIPEMISC_HDR_MODE_PRECISION;
3648 if (DISPLAY_VER(dev_priv) >= 12)
3649 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3651 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3654 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3656 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3659 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3661 switch (tmp & PIPEMISC_BPC_MASK) {
3662 case PIPEMISC_BPC_6:
3664 case PIPEMISC_BPC_8:
3666 case PIPEMISC_BPC_10:
3669 * PORT OUTPUT 12 BPC defined for ADLP+.
3672 * For previous platforms with DSI interface, bits 5:7
3673 * are used for storing pipe_bpp irrespective of dithering.
3674 * Since the value of 12 BPC is not defined for these bits
3675 * on older platforms, need to find a workaround for 12 BPC
3676 * MIPI DSI HW readout.
3678 case PIPEMISC_BPC_12_ADLP:
3679 if (DISPLAY_VER(dev_priv) > 12)
3688 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3691 * Account for spread spectrum to avoid
3692 * oversubscribing the link. Max center spread
3693 * is 2.5%; use 5% for safety's sake.
3695 u32 bps = target_clock * bpp * 21 / 20;
3696 return DIV_ROUND_UP(bps, link_bw * 8);
3699 void intel_get_m_n(struct drm_i915_private *i915,
3700 struct intel_link_m_n *m_n,
3701 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3702 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3704 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3705 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3706 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3707 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3708 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3711 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3712 enum transcoder transcoder,
3713 struct intel_link_m_n *m_n)
3715 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3716 enum pipe pipe = crtc->pipe;
3718 if (DISPLAY_VER(dev_priv) >= 5)
3719 intel_get_m_n(dev_priv, m_n,
3720 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3721 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3723 intel_get_m_n(dev_priv, m_n,
3724 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3725 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3728 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3729 enum transcoder transcoder,
3730 struct intel_link_m_n *m_n)
3732 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3734 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3737 intel_get_m_n(dev_priv, m_n,
3738 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3739 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3742 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3745 drm_rect_init(&crtc_state->pch_pfit.dst,
3746 pos >> 16, pos & 0xffff,
3747 size >> 16, size & 0xffff);
3750 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3752 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3754 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3758 /* find scaler attached to this pipe */
3759 for (i = 0; i < crtc->num_scalers; i++) {
3762 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3763 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3767 crtc_state->pch_pfit.enabled = true;
3769 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3770 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3772 ilk_get_pfit_pos_size(crtc_state, pos, size);
3774 scaler_state->scalers[i].in_use = true;
3778 scaler_state->scaler_id = id;
3780 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3782 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3785 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3787 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3788 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3791 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3792 if ((ctl & PF_ENABLE) == 0)
3795 crtc_state->pch_pfit.enabled = true;
3797 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3798 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3800 ilk_get_pfit_pos_size(crtc_state, pos, size);
3803 * We currently do not free assignements of panel fitters on
3804 * ivb/hsw (since we don't use the higher upscaling modes which
3805 * differentiates them) so just WARN about this case for now.
3807 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3808 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3811 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3812 struct intel_crtc_state *pipe_config)
3814 struct drm_device *dev = crtc->base.dev;
3815 struct drm_i915_private *dev_priv = to_i915(dev);
3816 enum intel_display_power_domain power_domain;
3817 intel_wakeref_t wakeref;
3821 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3822 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3826 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3827 pipe_config->shared_dpll = NULL;
3830 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3831 if (!(tmp & PIPECONF_ENABLE))
3834 switch (tmp & PIPECONF_BPC_MASK) {
3835 case PIPECONF_BPC_6:
3836 pipe_config->pipe_bpp = 18;
3838 case PIPECONF_BPC_8:
3839 pipe_config->pipe_bpp = 24;
3841 case PIPECONF_BPC_10:
3842 pipe_config->pipe_bpp = 30;
3844 case PIPECONF_BPC_12:
3845 pipe_config->pipe_bpp = 36;
3851 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
3852 pipe_config->limited_color_range = true;
3854 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
3855 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
3856 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
3857 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3860 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3864 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
3866 pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
3868 pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
3870 pipe_config->csc_mode = intel_de_read(dev_priv,
3871 PIPE_CSC_MODE(crtc->pipe));
3873 i9xx_get_pipe_color_config(pipe_config);
3874 intel_color_get_config(pipe_config);
3876 pipe_config->pixel_multiplier = 1;
3878 ilk_pch_get_config(pipe_config);
3880 intel_get_transcoder_timings(crtc, pipe_config);
3881 intel_get_pipe_src_size(crtc, pipe_config);
3883 ilk_get_pfit_config(pipe_config);
3888 intel_display_power_put(dev_priv, power_domain, wakeref);
3893 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3895 if (DISPLAY_VER(i915) >= 12)
3896 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3897 else if (DISPLAY_VER(i915) >= 11)
3898 return BIT(PIPE_B) | BIT(PIPE_C);
3903 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3904 enum transcoder cpu_transcoder)
3906 enum intel_display_power_domain power_domain;
3907 intel_wakeref_t wakeref;
3910 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3912 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3913 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3915 return tmp & TRANS_DDI_FUNC_ENABLE;
3918 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3919 u8 *master_pipes, u8 *slave_pipes)
3921 struct intel_crtc *crtc;
3926 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3927 bigjoiner_pipes(dev_priv)) {
3928 enum intel_display_power_domain power_domain;
3929 enum pipe pipe = crtc->pipe;
3930 intel_wakeref_t wakeref;
3932 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3933 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3934 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3936 if (!(tmp & BIG_JOINER_ENABLE))
3939 if (tmp & MASTER_BIG_JOINER_ENABLE)
3940 *master_pipes |= BIT(pipe);
3942 *slave_pipes |= BIT(pipe);
3945 if (DISPLAY_VER(dev_priv) < 13)
3948 power_domain = POWER_DOMAIN_PIPE(pipe);
3949 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3950 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3952 if (tmp & UNCOMPRESSED_JOINER_MASTER)
3953 *master_pipes |= BIT(pipe);
3954 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3955 *slave_pipes |= BIT(pipe);
3959 /* Bigjoiner pipes should always be consecutive master and slave */
3960 drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3961 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3962 *master_pipes, *slave_pipes);
3965 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3967 if ((slave_pipes & BIT(pipe)) == 0)
3970 /* ignore everything above our pipe */
3971 master_pipes &= ~GENMASK(7, pipe);
3973 /* highest remaining bit should be our master pipe */
3974 return fls(master_pipes) - 1;
3977 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3979 enum pipe master_pipe, next_master_pipe;
3981 master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3983 if ((master_pipes & BIT(master_pipe)) == 0)
3986 /* ignore our master pipe and everything below it */
3987 master_pipes &= ~GENMASK(master_pipe, 0);
3988 /* make sure a high bit is set for the ffs() */
3989 master_pipes |= BIT(7);
3990 /* lowest remaining bit should be the next master pipe */
3991 next_master_pipe = ffs(master_pipes) - 1;
3993 return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3996 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3998 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4000 if (DISPLAY_VER(i915) >= 11)
4001 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4003 return panel_transcoder_mask;
4006 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4008 struct drm_device *dev = crtc->base.dev;
4009 struct drm_i915_private *dev_priv = to_i915(dev);
4010 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4011 enum transcoder cpu_transcoder;
4012 u8 master_pipes, slave_pipes;
4013 u8 enabled_transcoders = 0;
4016 * XXX: Do intel_display_power_get_if_enabled before reading this (for
4017 * consistency and less surprising code; it's in always on power).
4019 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4020 panel_transcoder_mask) {
4021 enum intel_display_power_domain power_domain;
4022 intel_wakeref_t wakeref;
4023 enum pipe trans_pipe;
4026 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4027 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4028 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4030 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4033 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4036 "unknown pipe linked to transcoder %s\n",
4037 transcoder_name(cpu_transcoder));
4039 case TRANS_DDI_EDP_INPUT_A_ONOFF:
4040 case TRANS_DDI_EDP_INPUT_A_ON:
4041 trans_pipe = PIPE_A;
4043 case TRANS_DDI_EDP_INPUT_B_ONOFF:
4044 trans_pipe = PIPE_B;
4046 case TRANS_DDI_EDP_INPUT_C_ONOFF:
4047 trans_pipe = PIPE_C;
4049 case TRANS_DDI_EDP_INPUT_D_ONOFF:
4050 trans_pipe = PIPE_D;
4054 if (trans_pipe == crtc->pipe)
4055 enabled_transcoders |= BIT(cpu_transcoder);
4058 /* single pipe or bigjoiner master */
4059 cpu_transcoder = (enum transcoder) crtc->pipe;
4060 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4061 enabled_transcoders |= BIT(cpu_transcoder);
4063 /* bigjoiner slave -> consider the master pipe's transcoder as well */
4064 enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
4065 if (slave_pipes & BIT(crtc->pipe)) {
4066 cpu_transcoder = (enum transcoder)
4067 get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
4068 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4069 enabled_transcoders |= BIT(cpu_transcoder);
4072 return enabled_transcoders;
4075 static bool has_edp_transcoders(u8 enabled_transcoders)
4077 return enabled_transcoders & BIT(TRANSCODER_EDP);
4080 static bool has_dsi_transcoders(u8 enabled_transcoders)
4082 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4083 BIT(TRANSCODER_DSI_1));
4086 static bool has_pipe_transcoders(u8 enabled_transcoders)
4088 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4089 BIT(TRANSCODER_DSI_0) |
4090 BIT(TRANSCODER_DSI_1));
4093 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4094 u8 enabled_transcoders)
4096 /* Only one type of transcoder please */
4097 drm_WARN_ON(&i915->drm,
4098 has_edp_transcoders(enabled_transcoders) +
4099 has_dsi_transcoders(enabled_transcoders) +
4100 has_pipe_transcoders(enabled_transcoders) > 1);
4102 /* Only DSI transcoders can be ganged */
4103 drm_WARN_ON(&i915->drm,
4104 !has_dsi_transcoders(enabled_transcoders) &&
4105 !is_power_of_2(enabled_transcoders));
4108 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4109 struct intel_crtc_state *pipe_config,
4110 struct intel_display_power_domain_set *power_domain_set)
4112 struct drm_device *dev = crtc->base.dev;
4113 struct drm_i915_private *dev_priv = to_i915(dev);
4114 unsigned long enabled_transcoders;
4117 enabled_transcoders = hsw_enabled_transcoders(crtc);
4118 if (!enabled_transcoders)
4121 assert_enabled_transcoders(dev_priv, enabled_transcoders);
4124 * With the exception of DSI we should only ever have
4125 * a single enabled transcoder. With DSI let's just
4126 * pick the first one.
4128 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4130 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4131 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4134 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4135 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4137 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4138 pipe_config->pch_pfit.force_thru = true;
4141 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4143 return tmp & PIPECONF_ENABLE;
4146 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4147 struct intel_crtc_state *pipe_config,
4148 struct intel_display_power_domain_set *power_domain_set)
4150 struct drm_device *dev = crtc->base.dev;
4151 struct drm_i915_private *dev_priv = to_i915(dev);
4152 enum transcoder cpu_transcoder;
4156 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4158 cpu_transcoder = TRANSCODER_DSI_A;
4160 cpu_transcoder = TRANSCODER_DSI_C;
4162 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4163 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4167 * The PLL needs to be enabled with a valid divider
4168 * configuration, otherwise accessing DSI registers will hang
4169 * the machine. See BSpec North Display Engine
4170 * registers/MIPI[BXT]. We can break out here early, since we
4171 * need the same DSI PLL to be enabled for both DSI ports.
4173 if (!bxt_dsi_pll_is_enabled(dev_priv))
4176 /* XXX: this works for video mode only */
4177 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4178 if (!(tmp & DPI_ENABLE))
4181 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4182 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4185 pipe_config->cpu_transcoder = cpu_transcoder;
4189 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4192 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
4194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4195 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4196 u8 master_pipes, slave_pipes;
4197 enum pipe pipe = crtc->pipe;
4199 enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
4201 if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
4204 crtc_state->bigjoiner_pipes =
4205 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
4206 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
4209 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4210 struct intel_crtc_state *pipe_config)
4212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4213 struct intel_display_power_domain_set power_domain_set = { };
4217 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4218 POWER_DOMAIN_PIPE(crtc->pipe)))
4221 pipe_config->shared_dpll = NULL;
4223 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4225 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4226 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4227 drm_WARN_ON(&dev_priv->drm, active);
4234 intel_dsc_get_config(pipe_config);
4235 intel_bigjoiner_get_config(pipe_config);
4237 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4238 DISPLAY_VER(dev_priv) >= 11)
4239 intel_get_transcoder_timings(crtc, pipe_config);
4241 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4242 intel_vrr_get_config(crtc, pipe_config);
4244 intel_get_pipe_src_size(crtc, pipe_config);
4246 if (IS_HASWELL(dev_priv)) {
4247 u32 tmp = intel_de_read(dev_priv,
4248 PIPECONF(pipe_config->cpu_transcoder));
4250 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4251 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4253 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4255 pipe_config->output_format =
4256 bdw_get_pipemisc_output_format(crtc);
4259 pipe_config->gamma_mode = intel_de_read(dev_priv,
4260 GAMMA_MODE(crtc->pipe));
4262 pipe_config->csc_mode = intel_de_read(dev_priv,
4263 PIPE_CSC_MODE(crtc->pipe));
4265 if (DISPLAY_VER(dev_priv) >= 9) {
4266 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4268 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4269 pipe_config->gamma_enable = true;
4271 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4272 pipe_config->csc_enable = true;
4274 i9xx_get_pipe_color_config(pipe_config);
4277 intel_color_get_config(pipe_config);
4279 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4280 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4281 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4282 pipe_config->ips_linetime =
4283 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4285 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4286 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4287 if (DISPLAY_VER(dev_priv) >= 9)
4288 skl_get_pfit_config(pipe_config);
4290 ilk_get_pfit_config(pipe_config);
4293 hsw_ips_get_config(pipe_config);
4295 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4296 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4297 pipe_config->pixel_multiplier =
4298 intel_de_read(dev_priv,
4299 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4301 pipe_config->pixel_multiplier = 1;
4304 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4305 tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder));
4307 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
4309 /* no idea if this is correct */
4310 pipe_config->framestart_delay = 1;
4314 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4319 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4321 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4322 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4324 if (!i915->display->get_pipe_config(crtc, crtc_state))
4327 crtc_state->hw.active = true;
4329 intel_crtc_readout_derived_state(crtc_state);
4334 /* VESA 640x480x72Hz mode to set on the pipe */
4335 static const struct drm_display_mode load_detect_mode = {
4336 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4337 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4340 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4341 struct drm_crtc *crtc)
4343 struct drm_plane *plane;
4344 struct drm_plane_state *plane_state;
4347 ret = drm_atomic_add_affected_planes(state, crtc);
4351 for_each_new_plane_in_state(state, plane, plane_state, i) {
4352 if (plane_state->crtc != crtc)
4355 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4359 drm_atomic_set_fb_for_plane(plane_state, NULL);
4365 int intel_get_load_detect_pipe(struct drm_connector *connector,
4366 struct intel_load_detect_pipe *old,
4367 struct drm_modeset_acquire_ctx *ctx)
4369 struct intel_encoder *encoder =
4370 intel_attached_encoder(to_intel_connector(connector));
4371 struct intel_crtc *possible_crtc;
4372 struct intel_crtc *crtc = NULL;
4373 struct drm_device *dev = encoder->base.dev;
4374 struct drm_i915_private *dev_priv = to_i915(dev);
4375 struct drm_mode_config *config = &dev->mode_config;
4376 struct drm_atomic_state *state = NULL, *restore_state = NULL;
4377 struct drm_connector_state *connector_state;
4378 struct intel_crtc_state *crtc_state;
4381 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4382 connector->base.id, connector->name,
4383 encoder->base.base.id, encoder->base.name);
4385 old->restore_state = NULL;
4387 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4390 * Algorithm gets a little messy:
4392 * - if the connector already has an assigned crtc, use it (but make
4393 * sure it's on first)
4395 * - try to find the first unused crtc that can drive this connector,
4396 * and use that if we find one
4399 /* See if we already have a CRTC for this connector */
4400 if (connector->state->crtc) {
4401 crtc = to_intel_crtc(connector->state->crtc);
4403 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4407 /* Make sure the crtc and connector are running */
4411 /* Find an unused one (if possible) */
4412 for_each_intel_crtc(dev, possible_crtc) {
4413 if (!(encoder->base.possible_crtcs &
4414 drm_crtc_mask(&possible_crtc->base)))
4417 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4421 if (possible_crtc->base.state->enable) {
4422 drm_modeset_unlock(&possible_crtc->base.mutex);
4426 crtc = possible_crtc;
4431 * If we didn't find an unused CRTC, don't use any.
4434 drm_dbg_kms(&dev_priv->drm,
4435 "no pipe available for load-detect\n");
4441 state = drm_atomic_state_alloc(dev);
4442 restore_state = drm_atomic_state_alloc(dev);
4443 if (!state || !restore_state) {
4448 state->acquire_ctx = ctx;
4449 restore_state->acquire_ctx = ctx;
4451 connector_state = drm_atomic_get_connector_state(state, connector);
4452 if (IS_ERR(connector_state)) {
4453 ret = PTR_ERR(connector_state);
4457 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4461 crtc_state = intel_atomic_get_crtc_state(state, crtc);
4462 if (IS_ERR(crtc_state)) {
4463 ret = PTR_ERR(crtc_state);
4467 crtc_state->uapi.active = true;
4469 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4474 ret = intel_modeset_disable_planes(state, &crtc->base);
4478 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4480 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4482 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4484 drm_dbg_kms(&dev_priv->drm,
4485 "Failed to create a copy of old state to restore: %i\n",
4490 ret = drm_atomic_commit(state);
4492 drm_dbg_kms(&dev_priv->drm,
4493 "failed to set mode on load-detect pipe\n");
4497 old->restore_state = restore_state;
4498 drm_atomic_state_put(state);
4500 /* let the connector get through one full cycle before testing */
4501 intel_crtc_wait_for_next_vblank(crtc);
4507 drm_atomic_state_put(state);
4510 if (restore_state) {
4511 drm_atomic_state_put(restore_state);
4512 restore_state = NULL;
4515 if (ret == -EDEADLK)
4521 void intel_release_load_detect_pipe(struct drm_connector *connector,
4522 struct intel_load_detect_pipe *old,
4523 struct drm_modeset_acquire_ctx *ctx)
4525 struct intel_encoder *intel_encoder =
4526 intel_attached_encoder(to_intel_connector(connector));
4527 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4528 struct drm_encoder *encoder = &intel_encoder->base;
4529 struct drm_atomic_state *state = old->restore_state;
4532 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4533 connector->base.id, connector->name,
4534 encoder->base.id, encoder->name);
4539 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4541 drm_dbg_kms(&i915->drm,
4542 "Couldn't release load detect pipe: %i\n", ret);
4543 drm_atomic_state_put(state);
4546 static int i9xx_pll_refclk(struct drm_device *dev,
4547 const struct intel_crtc_state *pipe_config)
4549 struct drm_i915_private *dev_priv = to_i915(dev);
4550 u32 dpll = pipe_config->dpll_hw_state.dpll;
4552 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4553 return dev_priv->vbt.lvds_ssc_freq;
4554 else if (HAS_PCH_SPLIT(dev_priv))
4556 else if (DISPLAY_VER(dev_priv) != 2)
4562 /* Returns the clock of the currently programmed mode of the given pipe. */
4563 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4564 struct intel_crtc_state *pipe_config)
4566 struct drm_device *dev = crtc->base.dev;
4567 struct drm_i915_private *dev_priv = to_i915(dev);
4568 u32 dpll = pipe_config->dpll_hw_state.dpll;
4572 int refclk = i9xx_pll_refclk(dev, pipe_config);
4574 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4575 fp = pipe_config->dpll_hw_state.fp0;
4577 fp = pipe_config->dpll_hw_state.fp1;
4579 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4580 if (IS_PINEVIEW(dev_priv)) {
4581 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4582 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4584 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4585 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4588 if (DISPLAY_VER(dev_priv) != 2) {
4589 if (IS_PINEVIEW(dev_priv))
4590 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4591 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4593 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4594 DPLL_FPA01_P1_POST_DIV_SHIFT);
4596 switch (dpll & DPLL_MODE_MASK) {
4597 case DPLLB_MODE_DAC_SERIAL:
4598 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4601 case DPLLB_MODE_LVDS:
4602 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4606 drm_dbg_kms(&dev_priv->drm,
4607 "Unknown DPLL mode %08x in programmed "
4608 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4612 if (IS_PINEVIEW(dev_priv))
4613 port_clock = pnv_calc_dpll_params(refclk, &clock);
4615 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4617 enum pipe lvds_pipe;
4619 if (IS_I85X(dev_priv) &&
4620 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4621 lvds_pipe == crtc->pipe) {
4622 u32 lvds = intel_de_read(dev_priv, LVDS);
4624 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4625 DPLL_FPA01_P1_POST_DIV_SHIFT);
4627 if (lvds & LVDS_CLKB_POWER_UP)
4632 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4635 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4636 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4638 if (dpll & PLL_P2_DIVIDE_BY_4)
4644 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4648 * This value includes pixel_multiplier. We will use
4649 * port_clock to compute adjusted_mode.crtc_clock in the
4650 * encoder's get_config() function.
4652 pipe_config->port_clock = port_clock;
4655 int intel_dotclock_calculate(int link_freq,
4656 const struct intel_link_m_n *m_n)
4659 * The calculation for the data clock is:
4660 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4661 * But we want to avoid losing precison if possible, so:
4662 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4664 * and the link clock is simpler:
4665 * link_clock = (m * link_clock) / n
4671 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4674 /* Returns the currently programmed mode of the given encoder. */
4675 struct drm_display_mode *
4676 intel_encoder_current_mode(struct intel_encoder *encoder)
4678 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4679 struct intel_crtc_state *crtc_state;
4680 struct drm_display_mode *mode;
4681 struct intel_crtc *crtc;
4684 if (!encoder->get_hw_state(encoder, &pipe))
4687 crtc = intel_crtc_for_pipe(dev_priv, pipe);
4689 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4693 crtc_state = intel_crtc_state_alloc(crtc);
4699 if (!intel_crtc_get_pipe_config(crtc_state)) {
4705 intel_encoder_get_config(encoder, crtc_state);
4707 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4714 static bool encoders_cloneable(const struct intel_encoder *a,
4715 const struct intel_encoder *b)
4717 /* masks could be asymmetric, so check both ways */
4718 return a == b || (a->cloneable & (1 << b->type) &&
4719 b->cloneable & (1 << a->type));
4722 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4723 struct intel_crtc *crtc,
4724 struct intel_encoder *encoder)
4726 struct intel_encoder *source_encoder;
4727 struct drm_connector *connector;
4728 struct drm_connector_state *connector_state;
4731 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4732 if (connector_state->crtc != &crtc->base)
4736 to_intel_encoder(connector_state->best_encoder);
4737 if (!encoders_cloneable(encoder, source_encoder))
4744 static int icl_add_linked_planes(struct intel_atomic_state *state)
4746 struct intel_plane *plane, *linked;
4747 struct intel_plane_state *plane_state, *linked_plane_state;
4750 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4751 linked = plane_state->planar_linked_plane;
4756 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4757 if (IS_ERR(linked_plane_state))
4758 return PTR_ERR(linked_plane_state);
4760 drm_WARN_ON(state->base.dev,
4761 linked_plane_state->planar_linked_plane != plane);
4762 drm_WARN_ON(state->base.dev,
4763 linked_plane_state->planar_slave == plane_state->planar_slave);
4769 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4771 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4772 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4773 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4774 struct intel_plane *plane, *linked;
4775 struct intel_plane_state *plane_state;
4778 if (DISPLAY_VER(dev_priv) < 11)
4782 * Destroy all old plane links and make the slave plane invisible
4783 * in the crtc_state->active_planes mask.
4785 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4786 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4789 plane_state->planar_linked_plane = NULL;
4790 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4791 crtc_state->enabled_planes &= ~BIT(plane->id);
4792 crtc_state->active_planes &= ~BIT(plane->id);
4793 crtc_state->update_planes |= BIT(plane->id);
4794 crtc_state->data_rate[plane->id] = 0;
4795 crtc_state->rel_data_rate[plane->id] = 0;
4798 plane_state->planar_slave = false;
4801 if (!crtc_state->nv12_planes)
4804 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4805 struct intel_plane_state *linked_state = NULL;
4807 if (plane->pipe != crtc->pipe ||
4808 !(crtc_state->nv12_planes & BIT(plane->id)))
4811 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4812 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4815 if (crtc_state->active_planes & BIT(linked->id))
4818 linked_state = intel_atomic_get_plane_state(state, linked);
4819 if (IS_ERR(linked_state))
4820 return PTR_ERR(linked_state);
4825 if (!linked_state) {
4826 drm_dbg_kms(&dev_priv->drm,
4827 "Need %d free Y planes for planar YUV\n",
4828 hweight8(crtc_state->nv12_planes));
4833 plane_state->planar_linked_plane = linked;
4835 linked_state->planar_slave = true;
4836 linked_state->planar_linked_plane = plane;
4837 crtc_state->enabled_planes |= BIT(linked->id);
4838 crtc_state->active_planes |= BIT(linked->id);
4839 crtc_state->update_planes |= BIT(linked->id);
4840 crtc_state->data_rate[linked->id] =
4841 crtc_state->data_rate_y[plane->id];
4842 crtc_state->rel_data_rate[linked->id] =
4843 crtc_state->rel_data_rate_y[plane->id];
4844 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4845 linked->base.name, plane->base.name);
4847 /* Copy parameters to slave plane */
4848 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4849 linked_state->color_ctl = plane_state->color_ctl;
4850 linked_state->view = plane_state->view;
4851 linked_state->decrypt = plane_state->decrypt;
4853 intel_plane_copy_hw_state(linked_state, plane_state);
4854 linked_state->uapi.src = plane_state->uapi.src;
4855 linked_state->uapi.dst = plane_state->uapi.dst;
4857 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4858 if (linked->id == PLANE_SPRITE5)
4859 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4860 else if (linked->id == PLANE_SPRITE4)
4861 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4862 else if (linked->id == PLANE_SPRITE3)
4863 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4864 else if (linked->id == PLANE_SPRITE2)
4865 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4867 MISSING_CASE(linked->id);
4874 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4876 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4877 struct intel_atomic_state *state =
4878 to_intel_atomic_state(new_crtc_state->uapi.state);
4879 const struct intel_crtc_state *old_crtc_state =
4880 intel_atomic_get_old_crtc_state(state, crtc);
4882 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4885 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4887 const struct drm_display_mode *pipe_mode =
4888 &crtc_state->hw.pipe_mode;
4891 if (!crtc_state->hw.enable)
4894 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4895 pipe_mode->crtc_clock);
4897 return min(linetime_wm, 0x1ff);
4900 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4901 const struct intel_cdclk_state *cdclk_state)
4903 const struct drm_display_mode *pipe_mode =
4904 &crtc_state->hw.pipe_mode;
4907 if (!crtc_state->hw.enable)
4910 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4911 cdclk_state->logical.cdclk);
4913 return min(linetime_wm, 0x1ff);
4916 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4918 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4919 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4920 const struct drm_display_mode *pipe_mode =
4921 &crtc_state->hw.pipe_mode;
4924 if (!crtc_state->hw.enable)
4927 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4928 crtc_state->pixel_rate);
4930 /* Display WA #1135: BXT:ALL GLK:ALL */
4931 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4932 dev_priv->ipc_enabled)
4935 return min(linetime_wm, 0x1ff);
4938 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4939 struct intel_crtc *crtc)
4941 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4942 struct intel_crtc_state *crtc_state =
4943 intel_atomic_get_new_crtc_state(state, crtc);
4944 const struct intel_cdclk_state *cdclk_state;
4946 if (DISPLAY_VER(dev_priv) >= 9)
4947 crtc_state->linetime = skl_linetime_wm(crtc_state);
4949 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4951 if (!hsw_crtc_supports_ips(crtc))
4954 cdclk_state = intel_atomic_get_cdclk_state(state);
4955 if (IS_ERR(cdclk_state))
4956 return PTR_ERR(cdclk_state);
4958 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4964 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4965 struct intel_crtc *crtc)
4967 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4968 struct intel_crtc_state *crtc_state =
4969 intel_atomic_get_new_crtc_state(state, crtc);
4970 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
4973 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4974 mode_changed && !crtc_state->hw.active)
4975 crtc_state->update_wm_post = true;
4977 if (mode_changed && crtc_state->hw.enable &&
4978 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
4979 ret = intel_dpll_crtc_compute_clock(crtc_state);
4985 * May need to update pipe gamma enable bits
4986 * when C8 planes are getting enabled/disabled.
4988 if (c8_planes_changed(crtc_state))
4989 crtc_state->uapi.color_mgmt_changed = true;
4991 if (mode_changed || crtc_state->update_pipe ||
4992 crtc_state->uapi.color_mgmt_changed) {
4993 ret = intel_color_check(crtc_state);
4998 ret = intel_compute_pipe_wm(state, crtc);
5000 drm_dbg_kms(&dev_priv->drm,
5001 "Target pipe watermarks are invalid\n");
5006 * Calculate 'intermediate' watermarks that satisfy both the
5007 * old state and the new state. We can program these
5010 ret = intel_compute_intermediate_wm(state, crtc);
5012 drm_dbg_kms(&dev_priv->drm,
5013 "No valid intermediate pipe watermarks are possible\n");
5017 if (DISPLAY_VER(dev_priv) >= 9) {
5018 if (mode_changed || crtc_state->update_pipe) {
5019 ret = skl_update_scaler_crtc(crtc_state);
5024 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5029 if (HAS_IPS(dev_priv)) {
5030 ret = hsw_ips_compute_config(state, crtc);
5035 if (DISPLAY_VER(dev_priv) >= 9 ||
5036 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5037 ret = hsw_compute_linetime_wm(state, crtc);
5043 ret = intel_psr2_sel_fetch_update(state, crtc);
5050 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5052 struct intel_connector *connector;
5053 struct drm_connector_list_iter conn_iter;
5055 drm_connector_list_iter_begin(dev, &conn_iter);
5056 for_each_intel_connector_iter(connector, &conn_iter) {
5057 struct drm_connector_state *conn_state = connector->base.state;
5058 struct intel_encoder *encoder =
5059 to_intel_encoder(connector->base.encoder);
5061 if (conn_state->crtc)
5062 drm_connector_put(&connector->base);
5065 struct intel_crtc *crtc =
5066 to_intel_crtc(encoder->base.crtc);
5067 const struct intel_crtc_state *crtc_state =
5068 to_intel_crtc_state(crtc->base.state);
5070 conn_state->best_encoder = &encoder->base;
5071 conn_state->crtc = &crtc->base;
5072 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5074 drm_connector_get(&connector->base);
5076 conn_state->best_encoder = NULL;
5077 conn_state->crtc = NULL;
5080 drm_connector_list_iter_end(&conn_iter);
5084 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5085 struct intel_crtc_state *pipe_config)
5087 struct drm_connector *connector = conn_state->connector;
5088 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5089 const struct drm_display_info *info = &connector->display_info;
5092 switch (conn_state->max_bpc) {
5106 MISSING_CASE(conn_state->max_bpc);
5110 if (bpp < pipe_config->pipe_bpp) {
5111 drm_dbg_kms(&i915->drm,
5112 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5113 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5114 connector->base.id, connector->name,
5116 3 * conn_state->max_requested_bpc,
5117 pipe_config->pipe_bpp);
5119 pipe_config->pipe_bpp = bpp;
5126 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5127 struct intel_crtc_state *pipe_config)
5129 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5130 struct drm_atomic_state *state = pipe_config->uapi.state;
5131 struct drm_connector *connector;
5132 struct drm_connector_state *connector_state;
5135 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5136 IS_CHERRYVIEW(dev_priv)))
5138 else if (DISPLAY_VER(dev_priv) >= 5)
5143 pipe_config->pipe_bpp = bpp;
5145 /* Clamp display bpp to connector max bpp */
5146 for_each_new_connector_in_state(state, connector, connector_state, i) {
5149 if (connector_state->crtc != &crtc->base)
5152 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5160 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5161 const struct drm_display_mode *mode)
5163 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5164 "type: 0x%x flags: 0x%x\n",
5166 mode->crtc_hdisplay, mode->crtc_hsync_start,
5167 mode->crtc_hsync_end, mode->crtc_htotal,
5168 mode->crtc_vdisplay, mode->crtc_vsync_start,
5169 mode->crtc_vsync_end, mode->crtc_vtotal,
5170 mode->type, mode->flags);
5174 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5175 const char *id, unsigned int lane_count,
5176 const struct intel_link_m_n *m_n)
5178 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5180 drm_dbg_kms(&i915->drm,
5181 "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5183 m_n->data_m, m_n->data_n,
5184 m_n->link_m, m_n->link_n, m_n->tu);
5188 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5189 const union hdmi_infoframe *frame)
5191 if (!drm_debug_enabled(DRM_UT_KMS))
5194 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5198 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5199 const struct drm_dp_vsc_sdp *vsc)
5201 if (!drm_debug_enabled(DRM_UT_KMS))
5204 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5207 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5209 static const char * const output_type_str[] = {
5210 OUTPUT_TYPE(UNUSED),
5211 OUTPUT_TYPE(ANALOG),
5221 OUTPUT_TYPE(DP_MST),
5226 static void snprintf_output_types(char *buf, size_t len,
5227 unsigned int output_types)
5234 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5237 if ((output_types & BIT(i)) == 0)
5240 r = snprintf(str, len, "%s%s",
5241 str != buf ? "," : "", output_type_str[i]);
5247 output_types &= ~BIT(i);
5250 WARN_ON_ONCE(output_types != 0);
5253 static const char * const output_format_str[] = {
5254 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5255 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5256 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5259 static const char *output_formats(enum intel_output_format format)
5261 if (format >= ARRAY_SIZE(output_format_str))
5263 return output_format_str[format];
5266 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5268 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5269 struct drm_i915_private *i915 = to_i915(plane->base.dev);
5270 const struct drm_framebuffer *fb = plane_state->hw.fb;
5273 drm_dbg_kms(&i915->drm,
5274 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5275 plane->base.base.id, plane->base.name,
5276 str_yes_no(plane_state->uapi.visible));
5280 drm_dbg_kms(&i915->drm,
5281 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5282 plane->base.base.id, plane->base.name,
5283 fb->base.id, fb->width, fb->height, &fb->format->format,
5284 fb->modifier, str_yes_no(plane_state->uapi.visible));
5285 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5286 plane_state->hw.rotation, plane_state->scaler_id);
5287 if (plane_state->uapi.visible)
5288 drm_dbg_kms(&i915->drm,
5289 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5290 DRM_RECT_FP_ARG(&plane_state->uapi.src),
5291 DRM_RECT_ARG(&plane_state->uapi.dst));
5294 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5295 struct intel_atomic_state *state,
5296 const char *context)
5298 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5299 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5300 const struct intel_plane_state *plane_state;
5301 struct intel_plane *plane;
5305 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5306 crtc->base.base.id, crtc->base.name,
5307 str_yes_no(pipe_config->hw.enable), context);
5309 if (!pipe_config->hw.enable)
5312 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5313 drm_dbg_kms(&dev_priv->drm,
5314 "active: %s, output_types: %s (0x%x), output format: %s\n",
5315 str_yes_no(pipe_config->hw.active),
5316 buf, pipe_config->output_types,
5317 output_formats(pipe_config->output_format));
5319 drm_dbg_kms(&dev_priv->drm,
5320 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5321 transcoder_name(pipe_config->cpu_transcoder),
5322 pipe_config->pipe_bpp, pipe_config->dither);
5324 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5325 transcoder_name(pipe_config->mst_master_transcoder));
5327 drm_dbg_kms(&dev_priv->drm,
5328 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5329 transcoder_name(pipe_config->master_transcoder),
5330 pipe_config->sync_mode_slaves_mask);
5332 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n",
5333 intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
5334 intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
5335 pipe_config->bigjoiner_pipes);
5337 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5338 str_enabled_disabled(pipe_config->splitter.enable),
5339 pipe_config->splitter.link_count,
5340 pipe_config->splitter.pixel_overlap);
5342 if (pipe_config->has_pch_encoder)
5343 intel_dump_m_n_config(pipe_config, "fdi",
5344 pipe_config->fdi_lanes,
5345 &pipe_config->fdi_m_n);
5347 if (intel_crtc_has_dp_encoder(pipe_config)) {
5348 intel_dump_m_n_config(pipe_config, "dp m_n",
5349 pipe_config->lane_count,
5350 &pipe_config->dp_m_n);
5351 intel_dump_m_n_config(pipe_config, "dp m2_n2",
5352 pipe_config->lane_count,
5353 &pipe_config->dp_m2_n2);
5356 drm_dbg_kms(&dev_priv->drm, "framestart delay: %d, MSA timing delay: %d\n",
5357 pipe_config->framestart_delay, pipe_config->msa_timing_delay);
5359 drm_dbg_kms(&dev_priv->drm,
5360 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5361 pipe_config->has_audio, pipe_config->has_infoframe,
5362 pipe_config->infoframes.enable);
5364 if (pipe_config->infoframes.enable &
5365 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5366 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5367 pipe_config->infoframes.gcp);
5368 if (pipe_config->infoframes.enable &
5369 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5370 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5371 if (pipe_config->infoframes.enable &
5372 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5373 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5374 if (pipe_config->infoframes.enable &
5375 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5376 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5377 if (pipe_config->infoframes.enable &
5378 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5379 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5380 if (pipe_config->infoframes.enable &
5381 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5382 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5383 if (pipe_config->infoframes.enable &
5384 intel_hdmi_infoframe_enable(DP_SDP_VSC))
5385 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5387 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5388 str_yes_no(pipe_config->vrr.enable),
5389 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5390 pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5391 pipe_config->vrr.flipline,
5392 intel_vrr_vmin_vblank_start(pipe_config),
5393 intel_vrr_vmax_vblank_start(pipe_config));
5395 drm_dbg_kms(&dev_priv->drm, "requested mode: " DRM_MODE_FMT "\n",
5396 DRM_MODE_ARG(&pipe_config->hw.mode));
5397 drm_dbg_kms(&dev_priv->drm, "adjusted mode: " DRM_MODE_FMT "\n",
5398 DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
5399 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5400 drm_dbg_kms(&dev_priv->drm, "pipe mode: " DRM_MODE_FMT "\n",
5401 DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
5402 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5403 drm_dbg_kms(&dev_priv->drm,
5404 "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
5405 pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
5406 pipe_config->pixel_rate);
5408 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5409 pipe_config->linetime, pipe_config->ips_linetime);
5411 if (DISPLAY_VER(dev_priv) >= 9)
5412 drm_dbg_kms(&dev_priv->drm,
5413 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5415 pipe_config->scaler_state.scaler_users,
5416 pipe_config->scaler_state.scaler_id);
5418 if (HAS_GMCH(dev_priv))
5419 drm_dbg_kms(&dev_priv->drm,
5420 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5421 pipe_config->gmch_pfit.control,
5422 pipe_config->gmch_pfit.pgm_ratios,
5423 pipe_config->gmch_pfit.lvds_border_bits);
5425 drm_dbg_kms(&dev_priv->drm,
5426 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5427 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5428 str_enabled_disabled(pipe_config->pch_pfit.enabled),
5429 str_yes_no(pipe_config->pch_pfit.force_thru));
5431 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i, drrs: %i\n",
5432 pipe_config->ips_enabled, pipe_config->double_wide,
5433 pipe_config->has_drrs);
5435 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5437 if (IS_CHERRYVIEW(dev_priv))
5438 drm_dbg_kms(&dev_priv->drm,
5439 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5440 pipe_config->cgm_mode, pipe_config->gamma_mode,
5441 pipe_config->gamma_enable, pipe_config->csc_enable);
5443 drm_dbg_kms(&dev_priv->drm,
5444 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5445 pipe_config->csc_mode, pipe_config->gamma_mode,
5446 pipe_config->gamma_enable, pipe_config->csc_enable);
5448 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5449 pipe_config->hw.degamma_lut ?
5450 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5451 pipe_config->hw.gamma_lut ?
5452 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5458 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5459 if (plane->pipe == crtc->pipe)
5460 intel_dump_plane_state(plane_state);
5464 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5466 struct drm_device *dev = state->base.dev;
5467 struct drm_connector *connector;
5468 struct drm_connector_list_iter conn_iter;
5469 unsigned int used_ports = 0;
5470 unsigned int used_mst_ports = 0;
5474 * We're going to peek into connector->state,
5475 * hence connection_mutex must be held.
5477 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5480 * Walk the connector list instead of the encoder
5481 * list to detect the problem on ddi platforms
5482 * where there's just one encoder per digital port.
5484 drm_connector_list_iter_begin(dev, &conn_iter);
5485 drm_for_each_connector_iter(connector, &conn_iter) {
5486 struct drm_connector_state *connector_state;
5487 struct intel_encoder *encoder;
5490 drm_atomic_get_new_connector_state(&state->base,
5492 if (!connector_state)
5493 connector_state = connector->state;
5495 if (!connector_state->best_encoder)
5498 encoder = to_intel_encoder(connector_state->best_encoder);
5500 drm_WARN_ON(dev, !connector_state->crtc);
5502 switch (encoder->type) {
5503 case INTEL_OUTPUT_DDI:
5504 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5507 case INTEL_OUTPUT_DP:
5508 case INTEL_OUTPUT_HDMI:
5509 case INTEL_OUTPUT_EDP:
5510 /* the same port mustn't appear more than once */
5511 if (used_ports & BIT(encoder->port))
5514 used_ports |= BIT(encoder->port);
5516 case INTEL_OUTPUT_DP_MST:
5524 drm_connector_list_iter_end(&conn_iter);
5526 /* can't mix MST and SST/HDMI on the same port */
5527 if (used_ports & used_mst_ports)
5534 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5535 struct intel_crtc *crtc)
5537 struct intel_crtc_state *crtc_state =
5538 intel_atomic_get_new_crtc_state(state, crtc);
5540 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5542 drm_property_replace_blob(&crtc_state->hw.degamma_lut,
5543 crtc_state->uapi.degamma_lut);
5544 drm_property_replace_blob(&crtc_state->hw.gamma_lut,
5545 crtc_state->uapi.gamma_lut);
5546 drm_property_replace_blob(&crtc_state->hw.ctm,
5547 crtc_state->uapi.ctm);
5551 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
5552 struct intel_crtc *crtc)
5554 struct intel_crtc_state *crtc_state =
5555 intel_atomic_get_new_crtc_state(state, crtc);
5557 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
5559 crtc_state->hw.enable = crtc_state->uapi.enable;
5560 crtc_state->hw.active = crtc_state->uapi.active;
5561 drm_mode_copy(&crtc_state->hw.mode,
5562 &crtc_state->uapi.mode);
5563 drm_mode_copy(&crtc_state->hw.adjusted_mode,
5564 &crtc_state->uapi.adjusted_mode);
5565 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5567 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
5570 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5572 if (intel_crtc_is_bigjoiner_slave(crtc_state))
5575 crtc_state->uapi.enable = crtc_state->hw.enable;
5576 crtc_state->uapi.active = crtc_state->hw.active;
5577 drm_WARN_ON(crtc_state->uapi.crtc->dev,
5578 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5580 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5581 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5583 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5584 crtc_state->hw.degamma_lut);
5585 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5586 crtc_state->hw.gamma_lut);
5587 drm_property_replace_blob(&crtc_state->uapi.ctm,
5588 crtc_state->hw.ctm);
5592 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
5593 struct intel_crtc *slave_crtc)
5595 struct intel_crtc_state *slave_crtc_state =
5596 intel_atomic_get_new_crtc_state(state, slave_crtc);
5597 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5598 const struct intel_crtc_state *master_crtc_state =
5599 intel_atomic_get_new_crtc_state(state, master_crtc);
5601 drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
5602 master_crtc_state->hw.degamma_lut);
5603 drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
5604 master_crtc_state->hw.gamma_lut);
5605 drm_property_replace_blob(&slave_crtc_state->hw.ctm,
5606 master_crtc_state->hw.ctm);
5608 slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
5612 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
5613 struct intel_crtc *slave_crtc)
5615 struct intel_crtc_state *slave_crtc_state =
5616 intel_atomic_get_new_crtc_state(state, slave_crtc);
5617 struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5618 const struct intel_crtc_state *master_crtc_state =
5619 intel_atomic_get_new_crtc_state(state, master_crtc);
5620 struct intel_crtc_state *saved_state;
5622 WARN_ON(master_crtc_state->bigjoiner_pipes !=
5623 slave_crtc_state->bigjoiner_pipes);
5625 saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5629 /* preserve some things from the slave's original crtc state */
5630 saved_state->uapi = slave_crtc_state->uapi;
5631 saved_state->scaler_state = slave_crtc_state->scaler_state;
5632 saved_state->shared_dpll = slave_crtc_state->shared_dpll;
5633 saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
5634 saved_state->crc_enabled = slave_crtc_state->crc_enabled;
5636 intel_crtc_free_hw_state(slave_crtc_state);
5637 memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
5640 /* Re-init hw state */
5641 memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
5642 slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
5643 slave_crtc_state->hw.active = master_crtc_state->hw.active;
5644 drm_mode_copy(&slave_crtc_state->hw.mode,
5645 &master_crtc_state->hw.mode);
5646 drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
5647 &master_crtc_state->hw.pipe_mode);
5648 drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
5649 &master_crtc_state->hw.adjusted_mode);
5650 slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
5652 copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
5654 slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
5655 slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
5656 slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
5658 WARN_ON(master_crtc_state->bigjoiner_pipes !=
5659 slave_crtc_state->bigjoiner_pipes);
5665 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5666 struct intel_crtc *crtc)
5668 struct intel_crtc_state *crtc_state =
5669 intel_atomic_get_new_crtc_state(state, crtc);
5670 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5671 struct intel_crtc_state *saved_state;
5673 saved_state = intel_crtc_state_alloc(crtc);
5677 /* free the old crtc_state->hw members */
5678 intel_crtc_free_hw_state(crtc_state);
5680 /* FIXME: before the switch to atomic started, a new pipe_config was
5681 * kzalloc'd. Code that depends on any field being zero should be
5682 * fixed, so that the crtc_state can be safely duplicated. For now,
5683 * only fields that are know to not cause problems are preserved. */
5685 saved_state->uapi = crtc_state->uapi;
5686 saved_state->scaler_state = crtc_state->scaler_state;
5687 saved_state->shared_dpll = crtc_state->shared_dpll;
5688 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5689 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5690 sizeof(saved_state->icl_port_dplls));
5691 saved_state->crc_enabled = crtc_state->crc_enabled;
5692 if (IS_G4X(dev_priv) ||
5693 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5694 saved_state->wm = crtc_state->wm;
5696 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5699 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
5705 intel_modeset_pipe_config(struct intel_atomic_state *state,
5706 struct intel_crtc_state *pipe_config)
5708 struct drm_crtc *crtc = pipe_config->uapi.crtc;
5709 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5710 struct drm_connector *connector;
5711 struct drm_connector_state *connector_state;
5712 int pipe_src_w, pipe_src_h;
5713 int base_bpp, ret, i;
5716 pipe_config->cpu_transcoder =
5717 (enum transcoder) to_intel_crtc(crtc)->pipe;
5719 pipe_config->framestart_delay = 1;
5722 * Sanitize sync polarity flags based on requested ones. If neither
5723 * positive or negative polarity is requested, treat this as meaning
5724 * negative polarity.
5726 if (!(pipe_config->hw.adjusted_mode.flags &
5727 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5728 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5730 if (!(pipe_config->hw.adjusted_mode.flags &
5731 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5732 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5734 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
5739 base_bpp = pipe_config->pipe_bpp;
5742 * Determine the real pipe dimensions. Note that stereo modes can
5743 * increase the actual pipe size due to the frame doubling and
5744 * insertion of additional space for blanks between the frame. This
5745 * is stored in the crtc timings. We use the requested mode to do this
5746 * computation to clearly distinguish it from the adjusted mode, which
5747 * can be changed by the connectors in the below retry loop.
5749 drm_mode_get_hv_timing(&pipe_config->hw.mode,
5750 &pipe_src_w, &pipe_src_h);
5751 drm_rect_init(&pipe_config->pipe_src, 0, 0,
5752 pipe_src_w, pipe_src_h);
5754 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5755 struct intel_encoder *encoder =
5756 to_intel_encoder(connector_state->best_encoder);
5758 if (connector_state->crtc != crtc)
5761 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
5762 drm_dbg_kms(&i915->drm,
5763 "rejecting invalid cloning configuration\n");
5768 * Determine output_types before calling the .compute_config()
5769 * hooks so that the hooks can use this information safely.
5771 if (encoder->compute_output_type)
5772 pipe_config->output_types |=
5773 BIT(encoder->compute_output_type(encoder, pipe_config,
5776 pipe_config->output_types |= BIT(encoder->type);
5780 /* Ensure the port clock defaults are reset when retrying. */
5781 pipe_config->port_clock = 0;
5782 pipe_config->pixel_multiplier = 1;
5784 /* Fill in default crtc timings, allow encoders to overwrite them. */
5785 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
5786 CRTC_STEREO_DOUBLE);
5788 /* Pass our mode to the connectors and the CRTC to give them a chance to
5789 * adjust it according to limitations or connector properties, and also
5790 * a chance to reject the mode entirely.
5792 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5793 struct intel_encoder *encoder =
5794 to_intel_encoder(connector_state->best_encoder);
5796 if (connector_state->crtc != crtc)
5799 ret = encoder->compute_config(encoder, pipe_config,
5801 if (ret == -EDEADLK)
5804 drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
5809 /* Set default port clock if not overwritten by the encoder. Needs to be
5810 * done afterwards in case the encoder adjusts the mode. */
5811 if (!pipe_config->port_clock)
5812 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
5813 * pipe_config->pixel_multiplier;
5815 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
5816 if (ret == -EDEADLK)
5818 if (ret == -EAGAIN) {
5819 if (drm_WARN(&i915->drm, !retry,
5820 "loop in pipe configuration computation\n"))
5823 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
5828 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
5832 /* Dithering seems to not pass-through bits correctly when it should, so
5833 * only enable it on 6bpc panels and when its not a compliance
5834 * test requesting 6bpc video pattern.
5836 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
5837 !pipe_config->dither_force_disable;
5838 drm_dbg_kms(&i915->drm,
5839 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5840 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
5846 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
5848 struct intel_atomic_state *state =
5849 to_intel_atomic_state(crtc_state->uapi.state);
5850 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5851 struct drm_connector_state *conn_state;
5852 struct drm_connector *connector;
5855 intel_bigjoiner_adjust_pipe_src(crtc_state);
5857 for_each_new_connector_in_state(&state->base, connector,
5859 struct intel_encoder *encoder =
5860 to_intel_encoder(conn_state->best_encoder);
5863 if (conn_state->crtc != &crtc->base ||
5864 !encoder->compute_config_late)
5867 ret = encoder->compute_config_late(encoder, crtc_state,
5876 bool intel_fuzzy_clock_check(int clock1, int clock2)
5880 if (clock1 == clock2)
5883 if (!clock1 || !clock2)
5886 diff = abs(clock1 - clock2);
5888 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5895 intel_compare_m_n(unsigned int m, unsigned int n,
5896 unsigned int m2, unsigned int n2,
5899 if (m == m2 && n == n2)
5902 if (exact || !m || !n || !m2 || !n2)
5905 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
5912 } else if (n < n2) {
5922 return intel_fuzzy_clock_check(m, m2);
5926 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5927 const struct intel_link_m_n *m2_n2,
5930 return m_n->tu == m2_n2->tu &&
5931 intel_compare_m_n(m_n->data_m, m_n->data_n,
5932 m2_n2->data_m, m2_n2->data_n, exact) &&
5933 intel_compare_m_n(m_n->link_m, m_n->link_n,
5934 m2_n2->link_m, m2_n2->link_n, exact);
5938 intel_compare_infoframe(const union hdmi_infoframe *a,
5939 const union hdmi_infoframe *b)
5941 return memcmp(a, b, sizeof(*a)) == 0;
5945 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5946 const struct drm_dp_vsc_sdp *b)
5948 return memcmp(a, b, sizeof(*a)) == 0;
5952 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5953 bool fastset, const char *name,
5954 const union hdmi_infoframe *a,
5955 const union hdmi_infoframe *b)
5958 if (!drm_debug_enabled(DRM_UT_KMS))
5961 drm_dbg_kms(&dev_priv->drm,
5962 "fastset mismatch in %s infoframe\n", name);
5963 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5964 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5965 drm_dbg_kms(&dev_priv->drm, "found:\n");
5966 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5968 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5969 drm_err(&dev_priv->drm, "expected:\n");
5970 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5971 drm_err(&dev_priv->drm, "found:\n");
5972 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5977 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5978 bool fastset, const char *name,
5979 const struct drm_dp_vsc_sdp *a,
5980 const struct drm_dp_vsc_sdp *b)
5983 if (!drm_debug_enabled(DRM_UT_KMS))
5986 drm_dbg_kms(&dev_priv->drm,
5987 "fastset mismatch in %s dp sdp\n", name);
5988 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5989 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5990 drm_dbg_kms(&dev_priv->drm, "found:\n");
5991 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5993 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5994 drm_err(&dev_priv->drm, "expected:\n");
5995 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5996 drm_err(&dev_priv->drm, "found:\n");
5997 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6001 static void __printf(4, 5)
6002 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6003 const char *name, const char *format, ...)
6005 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6006 struct va_format vaf;
6009 va_start(args, format);
6014 drm_dbg_kms(&i915->drm,
6015 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6016 crtc->base.base.id, crtc->base.name, name, &vaf);
6018 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6019 crtc->base.base.id, crtc->base.name, name, &vaf);
6024 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6026 if (dev_priv->params.fastboot != -1)
6027 return dev_priv->params.fastboot;
6029 /* Enable fastboot by default on Skylake and newer */
6030 if (DISPLAY_VER(dev_priv) >= 9)
6033 /* Enable fastboot by default on VLV and CHV */
6034 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6037 /* Disabled by default on all others */
6042 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6043 const struct intel_crtc_state *pipe_config,
6046 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6047 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6050 bool fixup_inherited = fastset &&
6051 current_config->inherited && !pipe_config->inherited;
6053 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6054 drm_dbg_kms(&dev_priv->drm,
6055 "initial modeset and fastboot not set\n");
6059 #define PIPE_CONF_CHECK_X(name) do { \
6060 if (current_config->name != pipe_config->name) { \
6061 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6062 "(expected 0x%08x, found 0x%08x)", \
6063 current_config->name, \
6064 pipe_config->name); \
6069 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6070 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6071 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6072 "(expected 0x%08x, found 0x%08x)", \
6073 current_config->name & (mask), \
6074 pipe_config->name & (mask)); \
6079 #define PIPE_CONF_CHECK_I(name) do { \
6080 if (current_config->name != pipe_config->name) { \
6081 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6082 "(expected %i, found %i)", \
6083 current_config->name, \
6084 pipe_config->name); \
6089 #define PIPE_CONF_CHECK_BOOL(name) do { \
6090 if (current_config->name != pipe_config->name) { \
6091 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6092 "(expected %s, found %s)", \
6093 str_yes_no(current_config->name), \
6094 str_yes_no(pipe_config->name)); \
6100 * Checks state where we only read out the enabling, but not the entire
6101 * state itself (like full infoframes or ELD for audio). These states
6102 * require a full modeset on bootup to fix up.
6104 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6105 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6106 PIPE_CONF_CHECK_BOOL(name); \
6108 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6109 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6110 str_yes_no(current_config->name), \
6111 str_yes_no(pipe_config->name)); \
6116 #define PIPE_CONF_CHECK_P(name) do { \
6117 if (current_config->name != pipe_config->name) { \
6118 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6119 "(expected %p, found %p)", \
6120 current_config->name, \
6121 pipe_config->name); \
6126 #define PIPE_CONF_CHECK_M_N(name) do { \
6127 if (!intel_compare_link_m_n(¤t_config->name, \
6128 &pipe_config->name,\
6130 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6131 "(expected tu %i data %i/%i link %i/%i, " \
6132 "found tu %i, data %i/%i link %i/%i)", \
6133 current_config->name.tu, \
6134 current_config->name.data_m, \
6135 current_config->name.data_n, \
6136 current_config->name.link_m, \
6137 current_config->name.link_n, \
6138 pipe_config->name.tu, \
6139 pipe_config->name.data_m, \
6140 pipe_config->name.data_n, \
6141 pipe_config->name.link_m, \
6142 pipe_config->name.link_n); \
6147 /* This is required for BDW+ where there is only one set of registers for
6148 * switching between high and low RR.
6149 * This macro can be used whenever a comparison has to be made between one
6150 * hw state and multiple sw state variables.
6152 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6153 if (!intel_compare_link_m_n(¤t_config->name, \
6154 &pipe_config->name, !fastset) && \
6155 !intel_compare_link_m_n(¤t_config->alt_name, \
6156 &pipe_config->name, !fastset)) { \
6157 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6158 "(expected tu %i data %i/%i link %i/%i, " \
6159 "or tu %i data %i/%i link %i/%i, " \
6160 "found tu %i, data %i/%i link %i/%i)", \
6161 current_config->name.tu, \
6162 current_config->name.data_m, \
6163 current_config->name.data_n, \
6164 current_config->name.link_m, \
6165 current_config->name.link_n, \
6166 current_config->alt_name.tu, \
6167 current_config->alt_name.data_m, \
6168 current_config->alt_name.data_n, \
6169 current_config->alt_name.link_m, \
6170 current_config->alt_name.link_n, \
6171 pipe_config->name.tu, \
6172 pipe_config->name.data_m, \
6173 pipe_config->name.data_n, \
6174 pipe_config->name.link_m, \
6175 pipe_config->name.link_n); \
6180 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6181 if ((current_config->name ^ pipe_config->name) & (mask)) { \
6182 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6183 "(%x) (expected %i, found %i)", \
6185 current_config->name & (mask), \
6186 pipe_config->name & (mask)); \
6191 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6192 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6193 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6194 "(expected %i, found %i)", \
6195 current_config->name, \
6196 pipe_config->name); \
6201 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6202 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
6203 &pipe_config->infoframes.name)) { \
6204 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6205 ¤t_config->infoframes.name, \
6206 &pipe_config->infoframes.name); \
6211 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6212 if (!current_config->has_psr && !pipe_config->has_psr && \
6213 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
6214 &pipe_config->infoframes.name)) { \
6215 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6216 ¤t_config->infoframes.name, \
6217 &pipe_config->infoframes.name); \
6222 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6223 if (current_config->name1 != pipe_config->name1) { \
6224 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6225 "(expected %i, found %i, won't compare lut values)", \
6226 current_config->name1, \
6227 pipe_config->name1); \
6230 if (!intel_color_lut_equal(current_config->name2, \
6231 pipe_config->name2, pipe_config->name1, \
6233 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6234 "hw_state doesn't match sw_state"); \
6240 #define PIPE_CONF_QUIRK(quirk) \
6241 ((current_config->quirks | pipe_config->quirks) & (quirk))
6243 PIPE_CONF_CHECK_I(cpu_transcoder);
6245 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6246 PIPE_CONF_CHECK_I(fdi_lanes);
6247 PIPE_CONF_CHECK_M_N(fdi_m_n);
6249 PIPE_CONF_CHECK_I(lane_count);
6250 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6252 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) {
6253 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6255 PIPE_CONF_CHECK_M_N(dp_m_n);
6256 PIPE_CONF_CHECK_M_N(dp_m2_n2);
6259 PIPE_CONF_CHECK_X(output_types);
6261 PIPE_CONF_CHECK_I(framestart_delay);
6262 PIPE_CONF_CHECK_I(msa_timing_delay);
6264 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6265 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6266 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6267 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6268 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6269 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6271 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6272 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6273 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6274 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6275 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6276 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6278 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6279 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6280 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6281 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6282 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6283 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6285 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6286 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6287 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6288 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6289 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6290 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6292 PIPE_CONF_CHECK_I(pixel_multiplier);
6294 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6295 DRM_MODE_FLAG_INTERLACE);
6297 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6298 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6299 DRM_MODE_FLAG_PHSYNC);
6300 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6301 DRM_MODE_FLAG_NHSYNC);
6302 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6303 DRM_MODE_FLAG_PVSYNC);
6304 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6305 DRM_MODE_FLAG_NVSYNC);
6308 PIPE_CONF_CHECK_I(output_format);
6309 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6310 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6311 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6312 PIPE_CONF_CHECK_BOOL(limited_color_range);
6314 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6315 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6316 PIPE_CONF_CHECK_BOOL(has_infoframe);
6317 PIPE_CONF_CHECK_BOOL(fec_enable);
6319 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6321 PIPE_CONF_CHECK_X(gmch_pfit.control);
6322 /* pfit ratios are autocomputed by the hw on gen4+ */
6323 if (DISPLAY_VER(dev_priv) < 4)
6324 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6325 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6328 * Changing the EDP transcoder input mux
6329 * (A_ONOFF vs. A_ON) requires a full modeset.
6331 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6334 PIPE_CONF_CHECK_I(pipe_src.x1);
6335 PIPE_CONF_CHECK_I(pipe_src.y1);
6336 PIPE_CONF_CHECK_I(pipe_src.x2);
6337 PIPE_CONF_CHECK_I(pipe_src.y2);
6339 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6340 if (current_config->pch_pfit.enabled) {
6341 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6342 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6343 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6344 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6347 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6348 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6350 PIPE_CONF_CHECK_X(gamma_mode);
6351 if (IS_CHERRYVIEW(dev_priv))
6352 PIPE_CONF_CHECK_X(cgm_mode);
6354 PIPE_CONF_CHECK_X(csc_mode);
6355 PIPE_CONF_CHECK_BOOL(gamma_enable);
6356 PIPE_CONF_CHECK_BOOL(csc_enable);
6358 PIPE_CONF_CHECK_I(linetime);
6359 PIPE_CONF_CHECK_I(ips_linetime);
6361 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6363 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6365 if (current_config->active_planes) {
6366 PIPE_CONF_CHECK_BOOL(has_psr);
6367 PIPE_CONF_CHECK_BOOL(has_psr2);
6368 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6369 PIPE_CONF_CHECK_I(dc3co_exitline);
6373 PIPE_CONF_CHECK_BOOL(double_wide);
6375 if (dev_priv->dpll.mgr) {
6376 PIPE_CONF_CHECK_P(shared_dpll);
6378 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6379 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6380 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6381 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6382 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6383 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6384 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6385 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6386 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6387 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6388 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
6389 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6390 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6391 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6392 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6393 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6394 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6395 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6396 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6397 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6398 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6399 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6400 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6401 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6402 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6403 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6404 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6405 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6406 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6407 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6408 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6409 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6412 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6413 PIPE_CONF_CHECK_X(dsi_pll.div);
6415 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6416 PIPE_CONF_CHECK_I(pipe_bpp);
6418 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6419 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6420 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6422 PIPE_CONF_CHECK_I(min_voltage_level);
6424 if (current_config->has_psr || pipe_config->has_psr)
6425 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6426 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6428 PIPE_CONF_CHECK_X(infoframes.enable);
6430 PIPE_CONF_CHECK_X(infoframes.gcp);
6431 PIPE_CONF_CHECK_INFOFRAME(avi);
6432 PIPE_CONF_CHECK_INFOFRAME(spd);
6433 PIPE_CONF_CHECK_INFOFRAME(hdmi);
6434 PIPE_CONF_CHECK_INFOFRAME(drm);
6435 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6437 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6438 PIPE_CONF_CHECK_I(master_transcoder);
6439 PIPE_CONF_CHECK_X(bigjoiner_pipes);
6441 PIPE_CONF_CHECK_I(dsc.compression_enable);
6442 PIPE_CONF_CHECK_I(dsc.dsc_split);
6443 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6445 PIPE_CONF_CHECK_BOOL(splitter.enable);
6446 PIPE_CONF_CHECK_I(splitter.link_count);
6447 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6449 PIPE_CONF_CHECK_I(mst_master_transcoder);
6451 PIPE_CONF_CHECK_BOOL(vrr.enable);
6452 PIPE_CONF_CHECK_I(vrr.vmin);
6453 PIPE_CONF_CHECK_I(vrr.vmax);
6454 PIPE_CONF_CHECK_I(vrr.flipline);
6455 PIPE_CONF_CHECK_I(vrr.pipeline_full);
6456 PIPE_CONF_CHECK_I(vrr.guardband);
6458 #undef PIPE_CONF_CHECK_X
6459 #undef PIPE_CONF_CHECK_I
6460 #undef PIPE_CONF_CHECK_BOOL
6461 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6462 #undef PIPE_CONF_CHECK_P
6463 #undef PIPE_CONF_CHECK_FLAGS
6464 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6465 #undef PIPE_CONF_CHECK_COLOR_LUT
6466 #undef PIPE_CONF_QUIRK
6471 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6472 const struct intel_crtc_state *pipe_config)
6474 if (pipe_config->has_pch_encoder) {
6475 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6476 &pipe_config->fdi_m_n);
6477 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6480 * FDI already provided one idea for the dotclock.
6481 * Yell if the encoder disagrees.
6483 drm_WARN(&dev_priv->drm,
6484 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6485 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6486 fdi_dotclock, dotclock);
6490 static void verify_wm_state(struct intel_crtc *crtc,
6491 struct intel_crtc_state *new_crtc_state)
6493 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6494 struct skl_hw_state {
6495 struct skl_ddb_entry ddb[I915_MAX_PLANES];
6496 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6497 struct skl_pipe_wm wm;
6499 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6500 int level, max_level = ilk_wm_max_level(dev_priv);
6501 struct intel_plane *plane;
6502 u8 hw_enabled_slices;
6504 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6507 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6511 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6513 skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
6515 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6517 if (DISPLAY_VER(dev_priv) >= 11 &&
6518 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6519 drm_err(&dev_priv->drm,
6520 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6521 dev_priv->dbuf.enabled_slices,
6524 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6525 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6526 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6529 for (level = 0; level <= max_level; level++) {
6530 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6531 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6533 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6536 drm_err(&dev_priv->drm,
6537 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6538 plane->base.base.id, plane->base.name, level,
6539 sw_wm_level->enable,
6540 sw_wm_level->blocks,
6542 hw_wm_level->enable,
6543 hw_wm_level->blocks,
6544 hw_wm_level->lines);
6547 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6548 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6550 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6551 drm_err(&dev_priv->drm,
6552 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6553 plane->base.base.id, plane->base.name,
6554 sw_wm_level->enable,
6555 sw_wm_level->blocks,
6557 hw_wm_level->enable,
6558 hw_wm_level->blocks,
6559 hw_wm_level->lines);
6562 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6563 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6565 if (HAS_HW_SAGV_WM(dev_priv) &&
6566 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6567 drm_err(&dev_priv->drm,
6568 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6569 plane->base.base.id, plane->base.name,
6570 sw_wm_level->enable,
6571 sw_wm_level->blocks,
6573 hw_wm_level->enable,
6574 hw_wm_level->blocks,
6575 hw_wm_level->lines);
6578 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6579 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6581 if (HAS_HW_SAGV_WM(dev_priv) &&
6582 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6583 drm_err(&dev_priv->drm,
6584 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6585 plane->base.base.id, plane->base.name,
6586 sw_wm_level->enable,
6587 sw_wm_level->blocks,
6589 hw_wm_level->enable,
6590 hw_wm_level->blocks,
6591 hw_wm_level->lines);
6595 hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
6596 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
6598 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6599 drm_err(&dev_priv->drm,
6600 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6601 plane->base.base.id, plane->base.name,
6602 sw_ddb_entry->start, sw_ddb_entry->end,
6603 hw_ddb_entry->start, hw_ddb_entry->end);
6611 verify_connector_state(struct intel_atomic_state *state,
6612 struct intel_crtc *crtc)
6614 struct drm_connector *connector;
6615 struct drm_connector_state *new_conn_state;
6618 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6619 struct drm_encoder *encoder = connector->encoder;
6620 struct intel_crtc_state *crtc_state = NULL;
6622 if (new_conn_state->crtc != &crtc->base)
6626 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6628 intel_connector_verify_state(crtc_state, new_conn_state);
6630 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6631 "connector's atomic encoder doesn't match legacy encoder\n");
6636 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6638 struct intel_encoder *encoder;
6639 struct drm_connector *connector;
6640 struct drm_connector_state *old_conn_state, *new_conn_state;
6643 for_each_intel_encoder(&dev_priv->drm, encoder) {
6644 bool enabled = false, found = false;
6647 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6648 encoder->base.base.id,
6649 encoder->base.name);
6651 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6652 new_conn_state, i) {
6653 if (old_conn_state->best_encoder == &encoder->base)
6656 if (new_conn_state->best_encoder != &encoder->base)
6658 found = enabled = true;
6660 I915_STATE_WARN(new_conn_state->crtc !=
6662 "connector's crtc doesn't match encoder crtc\n");
6668 I915_STATE_WARN(!!encoder->base.crtc != enabled,
6669 "encoder's enabled state mismatch "
6670 "(expected %i, found %i)\n",
6671 !!encoder->base.crtc, enabled);
6673 if (!encoder->base.crtc) {
6676 active = encoder->get_hw_state(encoder, &pipe);
6677 I915_STATE_WARN(active,
6678 "encoder detached but still enabled on pipe %c.\n",
6685 verify_crtc_state(struct intel_crtc *crtc,
6686 struct intel_crtc_state *old_crtc_state,
6687 struct intel_crtc_state *new_crtc_state)
6689 struct drm_device *dev = crtc->base.dev;
6690 struct drm_i915_private *dev_priv = to_i915(dev);
6691 struct intel_encoder *encoder;
6692 struct intel_crtc_state *pipe_config = old_crtc_state;
6693 struct drm_atomic_state *state = old_crtc_state->uapi.state;
6694 struct intel_crtc *master_crtc;
6696 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6697 intel_crtc_free_hw_state(old_crtc_state);
6698 intel_crtc_state_reset(old_crtc_state, crtc);
6699 old_crtc_state->uapi.state = state;
6701 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6704 pipe_config->hw.enable = new_crtc_state->hw.enable;
6706 intel_crtc_get_pipe_config(pipe_config);
6708 /* we keep both pipes enabled on 830 */
6709 if (IS_I830(dev_priv) && pipe_config->hw.active)
6710 pipe_config->hw.active = new_crtc_state->hw.active;
6712 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6713 "crtc active state doesn't match with hw state "
6714 "(expected %i, found %i)\n",
6715 new_crtc_state->hw.active, pipe_config->hw.active);
6717 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
6718 "transitional active state does not match atomic hw state "
6719 "(expected %i, found %i)\n",
6720 new_crtc_state->hw.active, crtc->active);
6722 master_crtc = intel_master_crtc(new_crtc_state);
6724 for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
6728 active = encoder->get_hw_state(encoder, &pipe);
6729 I915_STATE_WARN(active != new_crtc_state->hw.active,
6730 "[ENCODER:%i] active %i with crtc active %i\n",
6731 encoder->base.base.id, active,
6732 new_crtc_state->hw.active);
6734 I915_STATE_WARN(active && master_crtc->pipe != pipe,
6735 "Encoder connected to wrong pipe %c\n",
6739 intel_encoder_get_config(encoder, pipe_config);
6742 if (!new_crtc_state->hw.active)
6745 intel_pipe_config_sanity_check(dev_priv, pipe_config);
6747 if (!intel_pipe_config_compare(new_crtc_state,
6748 pipe_config, false)) {
6749 I915_STATE_WARN(1, "pipe state doesn't match!\n");
6750 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
6751 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
6756 intel_verify_planes(struct intel_atomic_state *state)
6758 struct intel_plane *plane;
6759 const struct intel_plane_state *plane_state;
6762 for_each_new_intel_plane_in_state(state, plane,
6764 assert_plane(plane, plane_state->planar_slave ||
6765 plane_state->uapi.visible);
6769 verify_single_dpll_state(struct drm_i915_private *dev_priv,
6770 struct intel_shared_dpll *pll,
6771 struct intel_crtc *crtc,
6772 struct intel_crtc_state *new_crtc_state)
6774 struct intel_dpll_hw_state dpll_hw_state;
6778 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
6780 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
6782 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
6784 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
6785 I915_STATE_WARN(!pll->on && pll->active_mask,
6786 "pll in active use but not on in sw tracking\n");
6787 I915_STATE_WARN(pll->on && !pll->active_mask,
6788 "pll is on but not used by any active pipe\n");
6789 I915_STATE_WARN(pll->on != active,
6790 "pll on state mismatch (expected %i, found %i)\n",
6795 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
6796 "more active pll users than references: 0x%x vs 0x%x\n",
6797 pll->active_mask, pll->state.pipe_mask);
6802 pipe_mask = BIT(crtc->pipe);
6804 if (new_crtc_state->hw.active)
6805 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
6806 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
6807 pipe_name(crtc->pipe), pll->active_mask);
6809 I915_STATE_WARN(pll->active_mask & pipe_mask,
6810 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
6811 pipe_name(crtc->pipe), pll->active_mask);
6813 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
6814 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
6815 pipe_mask, pll->state.pipe_mask);
6817 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
6819 sizeof(dpll_hw_state)),
6820 "pll hw state mismatch\n");
6824 verify_shared_dpll_state(struct intel_crtc *crtc,
6825 struct intel_crtc_state *old_crtc_state,
6826 struct intel_crtc_state *new_crtc_state)
6828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6830 if (new_crtc_state->shared_dpll)
6831 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
6833 if (old_crtc_state->shared_dpll &&
6834 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
6835 u8 pipe_mask = BIT(crtc->pipe);
6836 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
6838 I915_STATE_WARN(pll->active_mask & pipe_mask,
6839 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
6840 pipe_name(crtc->pipe), pll->active_mask);
6841 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
6842 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
6843 pipe_name(crtc->pipe), pll->state.pipe_mask);
6848 verify_mpllb_state(struct intel_atomic_state *state,
6849 struct intel_crtc_state *new_crtc_state)
6851 struct drm_i915_private *i915 = to_i915(state->base.dev);
6852 struct intel_mpllb_state mpllb_hw_state = { 0 };
6853 struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
6854 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6855 struct intel_encoder *encoder;
6860 if (!new_crtc_state->hw.active)
6863 encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
6864 intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
6866 #define MPLLB_CHECK(name) do { \
6867 if (mpllb_sw_state->name != mpllb_hw_state.name) { \
6868 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
6869 "(expected 0x%08x, found 0x%08x)", \
6870 mpllb_sw_state->name, \
6871 mpllb_hw_state.name); \
6875 MPLLB_CHECK(mpllb_cp);
6876 MPLLB_CHECK(mpllb_div);
6877 MPLLB_CHECK(mpllb_div2);
6878 MPLLB_CHECK(mpllb_fracn1);
6879 MPLLB_CHECK(mpllb_fracn2);
6880 MPLLB_CHECK(mpllb_sscen);
6881 MPLLB_CHECK(mpllb_sscstep);
6884 * ref_control is handled by the hardware/firemware and never
6885 * programmed by the software, but the proper values are supplied
6886 * in the bspec for verification purposes.
6888 MPLLB_CHECK(ref_control);
6894 intel_modeset_verify_crtc(struct intel_crtc *crtc,
6895 struct intel_atomic_state *state,
6896 struct intel_crtc_state *old_crtc_state,
6897 struct intel_crtc_state *new_crtc_state)
6899 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
6902 verify_wm_state(crtc, new_crtc_state);
6903 verify_connector_state(state, crtc);
6904 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
6905 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
6906 verify_mpllb_state(state, new_crtc_state);
6910 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
6914 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
6915 verify_single_dpll_state(dev_priv,
6916 &dev_priv->dpll.shared_dplls[i],
6921 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
6922 struct intel_atomic_state *state)
6924 verify_encoder_state(dev_priv, state);
6925 verify_connector_state(state, NULL);
6926 verify_disabled_dpll_state(dev_priv);
6929 int intel_modeset_all_pipes(struct intel_atomic_state *state)
6931 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6932 struct intel_crtc *crtc;
6935 * Add all pipes to the state, and force
6936 * a modeset on all the active ones.
6938 for_each_intel_crtc(&dev_priv->drm, crtc) {
6939 struct intel_crtc_state *crtc_state;
6942 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6943 if (IS_ERR(crtc_state))
6944 return PTR_ERR(crtc_state);
6946 if (!crtc_state->hw.active ||
6947 drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
6950 crtc_state->uapi.mode_changed = true;
6952 ret = drm_atomic_add_affected_connectors(&state->base,
6957 ret = intel_atomic_add_affected_planes(state, crtc);
6961 crtc_state->update_planes |= crtc_state->active_planes;
6968 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
6970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6971 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6972 struct drm_display_mode adjusted_mode =
6973 crtc_state->hw.adjusted_mode;
6975 if (crtc_state->vrr.enable) {
6976 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
6977 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
6978 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
6979 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
6982 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
6984 crtc->mode_flags = crtc_state->mode_flags;
6987 * The scanline counter increments at the leading edge of hsync.
6989 * On most platforms it starts counting from vtotal-1 on the
6990 * first active line. That means the scanline counter value is
6991 * always one less than what we would expect. Ie. just after
6992 * start of vblank, which also occurs at start of hsync (on the
6993 * last active line), the scanline counter will read vblank_start-1.
6995 * On gen2 the scanline counter starts counting from 1 instead
6996 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
6997 * to keep the value positive), instead of adding one.
6999 * On HSW+ the behaviour of the scanline counter depends on the output
7000 * type. For DP ports it behaves like most other platforms, but on HDMI
7001 * there's an extra 1 line difference. So we need to add two instead of
7004 * On VLV/CHV DSI the scanline counter would appear to increment
7005 * approx. 1/3 of a scanline before start of vblank. Unfortunately
7006 * that means we can't tell whether we're in vblank or not while
7007 * we're on that particular line. We must still set scanline_offset
7008 * to 1 so that the vblank timestamps come out correct when we query
7009 * the scanline counter from within the vblank interrupt handler.
7010 * However if queried just before the start of vblank we'll get an
7011 * answer that's slightly in the future.
7013 if (DISPLAY_VER(dev_priv) == 2) {
7016 vtotal = adjusted_mode.crtc_vtotal;
7017 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7020 crtc->scanline_offset = vtotal - 1;
7021 } else if (HAS_DDI(dev_priv) &&
7022 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7023 crtc->scanline_offset = 2;
7025 crtc->scanline_offset = 1;
7029 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7031 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7032 struct intel_crtc_state *new_crtc_state;
7033 struct intel_crtc *crtc;
7036 if (!dev_priv->dpll_funcs)
7039 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7040 if (!intel_crtc_needs_modeset(new_crtc_state))
7043 intel_release_shared_dplls(state, crtc);
7048 * This implements the workaround described in the "notes" section of the mode
7049 * set sequence documentation. When going from no pipes or single pipe to
7050 * multiple pipes, and planes are enabled after the pipe, we need to wait at
7051 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7053 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7055 struct intel_crtc_state *crtc_state;
7056 struct intel_crtc *crtc;
7057 struct intel_crtc_state *first_crtc_state = NULL;
7058 struct intel_crtc_state *other_crtc_state = NULL;
7059 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7062 /* look at all crtc's that are going to be enabled in during modeset */
7063 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7064 if (!crtc_state->hw.active ||
7065 !intel_crtc_needs_modeset(crtc_state))
7068 if (first_crtc_state) {
7069 other_crtc_state = crtc_state;
7072 first_crtc_state = crtc_state;
7073 first_pipe = crtc->pipe;
7077 /* No workaround needed? */
7078 if (!first_crtc_state)
7081 /* w/a possibly needed, check how many crtc's are already enabled. */
7082 for_each_intel_crtc(state->base.dev, crtc) {
7083 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7084 if (IS_ERR(crtc_state))
7085 return PTR_ERR(crtc_state);
7087 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7089 if (!crtc_state->hw.active ||
7090 intel_crtc_needs_modeset(crtc_state))
7093 /* 2 or more enabled crtcs means no need for w/a */
7094 if (enabled_pipe != INVALID_PIPE)
7097 enabled_pipe = crtc->pipe;
7100 if (enabled_pipe != INVALID_PIPE)
7101 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7102 else if (other_crtc_state)
7103 other_crtc_state->hsw_workaround_pipe = first_pipe;
7108 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7111 const struct intel_crtc_state *crtc_state;
7112 struct intel_crtc *crtc;
7115 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7116 if (crtc_state->hw.active)
7117 active_pipes |= BIT(crtc->pipe);
7119 active_pipes &= ~BIT(crtc->pipe);
7122 return active_pipes;
7125 static int intel_modeset_checks(struct intel_atomic_state *state)
7127 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7129 state->modeset = true;
7131 if (IS_HASWELL(dev_priv))
7132 return hsw_mode_set_planes_workaround(state);
7137 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7138 struct intel_crtc_state *new_crtc_state)
7140 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7143 new_crtc_state->uapi.mode_changed = false;
7144 new_crtc_state->update_pipe = true;
7147 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7148 struct intel_crtc_state *new_crtc_state)
7151 * If we're not doing the full modeset we want to
7152 * keep the current M/N values as they may be
7153 * sufficiently different to the computed values
7154 * to cause problems.
7156 * FIXME: should really copy more fuzzy state here
7158 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7159 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7160 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7161 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7164 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7165 struct intel_crtc *crtc,
7168 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7169 struct intel_plane *plane;
7171 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7172 struct intel_plane_state *plane_state;
7174 if ((plane_ids_mask & BIT(plane->id)) == 0)
7177 plane_state = intel_atomic_get_plane_state(state, plane);
7178 if (IS_ERR(plane_state))
7179 return PTR_ERR(plane_state);
7185 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7186 struct intel_crtc *crtc)
7188 const struct intel_crtc_state *old_crtc_state =
7189 intel_atomic_get_old_crtc_state(state, crtc);
7190 const struct intel_crtc_state *new_crtc_state =
7191 intel_atomic_get_new_crtc_state(state, crtc);
7193 return intel_crtc_add_planes_to_state(state, crtc,
7194 old_crtc_state->enabled_planes |
7195 new_crtc_state->enabled_planes);
7198 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7200 /* See {hsw,vlv,ivb}_plane_ratio() */
7201 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7202 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7203 IS_IVYBRIDGE(dev_priv);
7206 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7207 struct intel_crtc *crtc,
7208 struct intel_crtc *other)
7210 const struct intel_plane_state *plane_state;
7211 struct intel_plane *plane;
7215 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7216 if (plane->pipe == crtc->pipe)
7217 plane_ids |= BIT(plane->id);
7220 return intel_crtc_add_planes_to_state(state, other, plane_ids);
7223 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7225 struct drm_i915_private *i915 = to_i915(state->base.dev);
7226 const struct intel_crtc_state *crtc_state;
7227 struct intel_crtc *crtc;
7230 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7231 struct intel_crtc *other;
7233 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
7234 crtc_state->bigjoiner_pipes) {
7240 ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
7249 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7251 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7252 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7253 struct intel_plane_state *plane_state;
7254 struct intel_plane *plane;
7255 struct intel_crtc *crtc;
7258 ret = icl_add_linked_planes(state);
7262 ret = intel_bigjoiner_add_affected_planes(state);
7266 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7267 ret = intel_plane_atomic_check(state, plane);
7269 drm_dbg_atomic(&dev_priv->drm,
7270 "[PLANE:%d:%s] atomic driver check failed\n",
7271 plane->base.base.id, plane->base.name);
7276 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7277 new_crtc_state, i) {
7278 u8 old_active_planes, new_active_planes;
7280 ret = icl_check_nv12_planes(new_crtc_state);
7285 * On some platforms the number of active planes affects
7286 * the planes' minimum cdclk calculation. Add such planes
7287 * to the state before we compute the minimum cdclk.
7289 if (!active_planes_affects_min_cdclk(dev_priv))
7292 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7293 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7295 if (hweight8(old_active_planes) == hweight8(new_active_planes))
7298 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7306 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7308 struct intel_crtc_state *crtc_state;
7309 struct intel_crtc *crtc;
7312 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7313 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7316 ret = intel_crtc_atomic_check(state, crtc);
7318 drm_dbg_atomic(&i915->drm,
7319 "[CRTC:%d:%s] atomic driver check failed\n",
7320 crtc->base.base.id, crtc->base.name);
7328 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7331 const struct intel_crtc_state *new_crtc_state;
7332 struct intel_crtc *crtc;
7335 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7336 if (new_crtc_state->hw.enable &&
7337 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7338 intel_crtc_needs_modeset(new_crtc_state))
7345 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
7348 const struct intel_crtc_state *new_crtc_state;
7349 struct intel_crtc *crtc;
7352 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7353 if (new_crtc_state->hw.enable &&
7354 pipes & BIT(crtc->pipe) &&
7355 intel_crtc_needs_modeset(new_crtc_state))
7362 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7363 struct intel_crtc *master_crtc)
7365 struct drm_i915_private *i915 = to_i915(state->base.dev);
7366 struct intel_crtc_state *master_crtc_state =
7367 intel_atomic_get_new_crtc_state(state, master_crtc);
7368 struct intel_crtc *slave_crtc;
7370 if (!master_crtc_state->bigjoiner_pipes)
7374 if (drm_WARN_ON(&i915->drm,
7375 master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
7378 if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
7379 drm_dbg_kms(&i915->drm,
7380 "[CRTC:%d:%s] Cannot act as big joiner master "
7381 "(need 0x%x as pipes, only 0x%x possible)\n",
7382 master_crtc->base.base.id, master_crtc->base.name,
7383 master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
7387 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
7388 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
7389 struct intel_crtc_state *slave_crtc_state;
7392 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7393 if (IS_ERR(slave_crtc_state))
7394 return PTR_ERR(slave_crtc_state);
7396 /* master being enabled, slave was already configured? */
7397 if (slave_crtc_state->uapi.enable) {
7398 drm_dbg_kms(&i915->drm,
7399 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7400 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7401 slave_crtc->base.base.id, slave_crtc->base.name,
7402 master_crtc->base.base.id, master_crtc->base.name);
7407 * The state copy logic assumes the master crtc gets processed
7408 * before the slave crtc during the main compute_config loop.
7409 * This works because the crtcs are created in pipe order,
7410 * and the hardware requires master pipe < slave pipe as well.
7411 * Should that change we need to rethink the logic.
7413 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
7414 drm_crtc_index(&slave_crtc->base)))
7417 drm_dbg_kms(&i915->drm,
7418 "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
7419 slave_crtc->base.base.id, slave_crtc->base.name,
7420 master_crtc->base.base.id, master_crtc->base.name);
7422 slave_crtc_state->bigjoiner_pipes =
7423 master_crtc_state->bigjoiner_pipes;
7425 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
7433 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7434 struct intel_crtc *master_crtc)
7436 struct drm_i915_private *i915 = to_i915(state->base.dev);
7437 struct intel_crtc_state *master_crtc_state =
7438 intel_atomic_get_new_crtc_state(state, master_crtc);
7439 struct intel_crtc *slave_crtc;
7441 for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
7442 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
7443 struct intel_crtc_state *slave_crtc_state =
7444 intel_atomic_get_new_crtc_state(state, slave_crtc);
7446 slave_crtc_state->bigjoiner_pipes = 0;
7448 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
7451 master_crtc_state->bigjoiner_pipes = 0;
7455 * DOC: asynchronous flip implementation
7457 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7458 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7459 * Correspondingly, support is currently added for primary plane only.
7461 * Async flip can only change the plane surface address, so anything else
7462 * changing is rejected from the intel_async_flip_check_hw() function.
7463 * Once this check is cleared, flip done interrupt is enabled using
7464 * the intel_crtc_enable_flip_done() function.
7466 * As soon as the surface address register is written, flip done interrupt is
7467 * generated and the requested events are sent to the usersapce in the interrupt
7468 * handler itself. The timestamp and sequence sent during the flip done event
7469 * correspond to the last vblank and have no relation to the actual time when
7470 * the flip done event was sent.
7472 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
7473 struct intel_crtc *crtc)
7475 struct drm_i915_private *i915 = to_i915(state->base.dev);
7476 const struct intel_crtc_state *new_crtc_state =
7477 intel_atomic_get_new_crtc_state(state, crtc);
7478 const struct intel_plane_state *old_plane_state;
7479 struct intel_plane_state *new_plane_state;
7480 struct intel_plane *plane;
7483 if (!new_crtc_state->uapi.async_flip)
7486 if (!new_crtc_state->uapi.active) {
7487 drm_dbg_kms(&i915->drm,
7488 "[CRTC:%d:%s] not active\n",
7489 crtc->base.base.id, crtc->base.name);
7493 if (intel_crtc_needs_modeset(new_crtc_state)) {
7494 drm_dbg_kms(&i915->drm,
7495 "[CRTC:%d:%s] modeset required\n",
7496 crtc->base.base.id, crtc->base.name);
7500 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7501 new_plane_state, i) {
7502 if (plane->pipe != crtc->pipe)
7506 * TODO: Async flip is only supported through the page flip IOCTL
7507 * as of now. So support currently added for primary plane only.
7508 * Support for other planes on platforms on which supports
7509 * this(vlv/chv and icl+) should be added when async flip is
7510 * enabled in the atomic IOCTL path.
7512 if (!plane->async_flip) {
7513 drm_dbg_kms(&i915->drm,
7514 "[PLANE:%d:%s] async flip not supported\n",
7515 plane->base.base.id, plane->base.name);
7519 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
7520 drm_dbg_kms(&i915->drm,
7521 "[PLANE:%d:%s] no old or new framebuffer\n",
7522 plane->base.base.id, plane->base.name);
7530 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
7532 struct drm_i915_private *i915 = to_i915(state->base.dev);
7533 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7534 const struct intel_plane_state *new_plane_state, *old_plane_state;
7535 struct intel_plane *plane;
7538 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7539 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7541 if (!new_crtc_state->uapi.async_flip)
7544 if (!new_crtc_state->hw.active) {
7545 drm_dbg_kms(&i915->drm,
7546 "[CRTC:%d:%s] not active\n",
7547 crtc->base.base.id, crtc->base.name);
7551 if (intel_crtc_needs_modeset(new_crtc_state)) {
7552 drm_dbg_kms(&i915->drm,
7553 "[CRTC:%d:%s] modeset required\n",
7554 crtc->base.base.id, crtc->base.name);
7558 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7559 drm_dbg_kms(&i915->drm,
7560 "[CRTC:%d:%s] Active planes cannot be in async flip\n",
7561 crtc->base.base.id, crtc->base.name);
7565 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7566 new_plane_state, i) {
7567 if (plane->pipe != crtc->pipe)
7571 * Only async flip capable planes should be in the state
7572 * if we're really about to ask the hardware to perform
7573 * an async flip. We should never get this far otherwise.
7575 if (drm_WARN_ON(&i915->drm,
7576 new_crtc_state->do_async_flip && !plane->async_flip))
7580 * Only check async flip capable planes other planes
7581 * may be involved in the initial commit due to
7582 * the wm0/ddb optimization.
7584 * TODO maybe should track which planes actually
7585 * were requested to do the async flip...
7587 if (!plane->async_flip)
7591 * FIXME: This check is kept generic for all platforms.
7592 * Need to verify this for all gen9 platforms to enable
7593 * this selectively if required.
7595 switch (new_plane_state->hw.fb->modifier) {
7596 case I915_FORMAT_MOD_X_TILED:
7597 case I915_FORMAT_MOD_Y_TILED:
7598 case I915_FORMAT_MOD_Yf_TILED:
7599 case I915_FORMAT_MOD_4_TILED:
7602 drm_dbg_kms(&i915->drm,
7603 "[PLANE:%d:%s] Modifier does not support async flips\n",
7604 plane->base.base.id, plane->base.name);
7608 if (new_plane_state->hw.fb->format->num_planes > 1) {
7609 drm_dbg_kms(&i915->drm,
7610 "[PLANE:%d:%s] Planar formats do not support async flips\n",
7611 plane->base.base.id, plane->base.name);
7615 if (old_plane_state->view.color_plane[0].mapping_stride !=
7616 new_plane_state->view.color_plane[0].mapping_stride) {
7617 drm_dbg_kms(&i915->drm,
7618 "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
7619 plane->base.base.id, plane->base.name);
7623 if (old_plane_state->hw.fb->modifier !=
7624 new_plane_state->hw.fb->modifier) {
7625 drm_dbg_kms(&i915->drm,
7626 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
7627 plane->base.base.id, plane->base.name);
7631 if (old_plane_state->hw.fb->format !=
7632 new_plane_state->hw.fb->format) {
7633 drm_dbg_kms(&i915->drm,
7634 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
7635 plane->base.base.id, plane->base.name);
7639 if (old_plane_state->hw.rotation !=
7640 new_plane_state->hw.rotation) {
7641 drm_dbg_kms(&i915->drm,
7642 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
7643 plane->base.base.id, plane->base.name);
7647 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7648 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7649 drm_dbg_kms(&i915->drm,
7650 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
7651 plane->base.base.id, plane->base.name);
7655 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7656 drm_dbg_kms(&i915->drm,
7657 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
7658 plane->base.base.id, plane->base.name);
7662 if (old_plane_state->hw.pixel_blend_mode !=
7663 new_plane_state->hw.pixel_blend_mode) {
7664 drm_dbg_kms(&i915->drm,
7665 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
7666 plane->base.base.id, plane->base.name);
7670 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7671 drm_dbg_kms(&i915->drm,
7672 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
7673 plane->base.base.id, plane->base.name);
7677 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7678 drm_dbg_kms(&i915->drm,
7679 "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
7680 plane->base.base.id, plane->base.name);
7684 /* plane decryption is allow to change only in synchronous flips */
7685 if (old_plane_state->decrypt != new_plane_state->decrypt) {
7686 drm_dbg_kms(&i915->drm,
7687 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
7688 plane->base.base.id, plane->base.name);
7696 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7698 struct drm_i915_private *i915 = to_i915(state->base.dev);
7699 struct intel_crtc_state *crtc_state;
7700 struct intel_crtc *crtc;
7701 u8 affected_pipes = 0;
7702 u8 modeset_pipes = 0;
7705 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7706 affected_pipes |= crtc_state->bigjoiner_pipes;
7707 if (intel_crtc_needs_modeset(crtc_state))
7708 modeset_pipes |= crtc_state->bigjoiner_pipes;
7711 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
7712 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7713 if (IS_ERR(crtc_state))
7714 return PTR_ERR(crtc_state);
7717 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
7720 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7722 crtc_state->uapi.mode_changed = true;
7724 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
7728 ret = intel_atomic_add_affected_planes(state, crtc);
7733 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7734 /* Kill old bigjoiner link, we may re-establish afterwards */
7735 if (intel_crtc_needs_modeset(crtc_state) &&
7736 intel_crtc_is_bigjoiner_master(crtc_state))
7737 kill_bigjoiner_slave(state, crtc);
7744 * intel_atomic_check - validate state object
7746 * @_state: state to validate
7748 static int intel_atomic_check(struct drm_device *dev,
7749 struct drm_atomic_state *_state)
7751 struct drm_i915_private *dev_priv = to_i915(dev);
7752 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7753 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7754 struct intel_crtc *crtc;
7756 bool any_ms = false;
7758 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7759 new_crtc_state, i) {
7760 if (new_crtc_state->inherited != old_crtc_state->inherited)
7761 new_crtc_state->uapi.mode_changed = true;
7763 if (new_crtc_state->uapi.scaling_filter !=
7764 old_crtc_state->uapi.scaling_filter)
7765 new_crtc_state->uapi.mode_changed = true;
7768 intel_vrr_check_modeset(state);
7770 ret = drm_atomic_helper_check_modeset(dev, &state->base);
7774 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7775 ret = intel_async_flip_check_uapi(state, crtc);
7780 ret = intel_bigjoiner_add_affected_crtcs(state);
7784 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7785 new_crtc_state, i) {
7786 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7787 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
7788 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
7790 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
7794 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
7795 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
7799 ret = intel_crtc_prepare_cleared_state(state, crtc);
7803 if (!new_crtc_state->hw.enable)
7806 ret = intel_modeset_pipe_config(state, new_crtc_state);
7810 ret = intel_atomic_check_bigjoiner(state, crtc);
7815 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7816 new_crtc_state, i) {
7817 if (!intel_crtc_needs_modeset(new_crtc_state))
7820 ret = intel_modeset_pipe_config_late(new_crtc_state);
7824 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7828 * Check if fastset is allowed by external dependencies like other
7829 * pipes and transcoders.
7831 * Right now it only forces a fullmodeset when the MST master
7832 * transcoder did not changed but the pipe of the master transcoder
7833 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7834 * in case of port synced crtcs, if one of the synced crtcs
7835 * needs a full modeset, all other synced crtcs should be
7836 * forced a full modeset.
7838 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7839 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7842 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7843 enum transcoder master = new_crtc_state->mst_master_transcoder;
7845 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7846 new_crtc_state->uapi.mode_changed = true;
7847 new_crtc_state->update_pipe = false;
7851 if (is_trans_port_sync_mode(new_crtc_state)) {
7852 u8 trans = new_crtc_state->sync_mode_slaves_mask;
7854 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7855 trans |= BIT(new_crtc_state->master_transcoder);
7857 if (intel_cpu_transcoders_need_modeset(state, trans)) {
7858 new_crtc_state->uapi.mode_changed = true;
7859 new_crtc_state->update_pipe = false;
7863 if (new_crtc_state->bigjoiner_pipes) {
7864 if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
7865 new_crtc_state->uapi.mode_changed = true;
7866 new_crtc_state->update_pipe = false;
7871 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7872 new_crtc_state, i) {
7873 if (intel_crtc_needs_modeset(new_crtc_state)) {
7878 if (!new_crtc_state->update_pipe)
7881 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
7884 if (any_ms && !check_digital_port_conflicts(state)) {
7885 drm_dbg_kms(&dev_priv->drm,
7886 "rejecting conflicting digital port configuration\n");
7891 ret = drm_dp_mst_atomic_check(&state->base);
7895 ret = intel_atomic_check_planes(state);
7899 ret = intel_compute_global_watermarks(state);
7903 ret = intel_bw_atomic_check(state);
7907 ret = intel_cdclk_atomic_check(state, &any_ms);
7911 if (intel_any_crtc_needs_modeset(state))
7915 ret = intel_modeset_checks(state);
7919 ret = intel_modeset_calc_cdclk(state);
7923 intel_modeset_clear_plls(state);
7926 ret = intel_atomic_check_crtcs(state);
7930 ret = intel_fbc_atomic_check(state);
7934 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7935 new_crtc_state, i) {
7936 ret = intel_async_flip_check_hw(state, crtc);
7940 if (!intel_crtc_needs_modeset(new_crtc_state) &&
7941 !new_crtc_state->update_pipe)
7944 intel_dump_pipe_config(new_crtc_state, state,
7945 intel_crtc_needs_modeset(new_crtc_state) ?
7946 "[modeset]" : "[fastset]");
7952 if (ret == -EDEADLK)
7956 * FIXME would probably be nice to know which crtc specifically
7957 * caused the failure, in cases where we can pinpoint it.
7959 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7961 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
7966 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
7968 struct intel_crtc_state *crtc_state;
7969 struct intel_crtc *crtc;
7972 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
7976 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7977 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7979 if (mode_changed || crtc_state->update_pipe ||
7980 crtc_state->uapi.color_mgmt_changed) {
7981 intel_dsb_prepare(crtc_state);
7988 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
7989 struct intel_crtc_state *crtc_state)
7991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7993 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
7994 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7996 if (crtc_state->has_pch_encoder) {
7997 enum pipe pch_transcoder =
7998 intel_crtc_pch_transcoder(crtc);
8000 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8004 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8005 const struct intel_crtc_state *new_crtc_state)
8007 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8011 * Update pipe size and adjust fitter if needed: the reason for this is
8012 * that in compute_mode_changes we check the native mode (not the pfit
8013 * mode) to see if we can flip rather than do a full mode set. In the
8014 * fastboot case, we'll flip, but if we don't update the pipesrc and
8015 * pfit state, we'll end up with a big fb scanned out into the wrong
8018 intel_set_pipe_src_size(new_crtc_state);
8020 /* on skylake this is done by detaching scalers */
8021 if (DISPLAY_VER(dev_priv) >= 9) {
8022 if (new_crtc_state->pch_pfit.enabled)
8023 skl_pfit_enable(new_crtc_state);
8024 } else if (HAS_PCH_SPLIT(dev_priv)) {
8025 if (new_crtc_state->pch_pfit.enabled)
8026 ilk_pfit_enable(new_crtc_state);
8027 else if (old_crtc_state->pch_pfit.enabled)
8028 ilk_pfit_disable(old_crtc_state);
8032 * The register is supposedly single buffered so perhaps
8033 * not 100% correct to do this here. But SKL+ calculate
8034 * this based on the adjust pixel rate so pfit changes do
8035 * affect it and so it must be updated for fastsets.
8036 * HSW/BDW only really need this here for fastboot, after
8037 * that the value should not change without a full modeset.
8039 if (DISPLAY_VER(dev_priv) >= 9 ||
8040 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8041 hsw_set_linetime_wm(new_crtc_state);
8044 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8045 struct intel_crtc *crtc)
8047 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8048 const struct intel_crtc_state *old_crtc_state =
8049 intel_atomic_get_old_crtc_state(state, crtc);
8050 const struct intel_crtc_state *new_crtc_state =
8051 intel_atomic_get_new_crtc_state(state, crtc);
8052 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8055 * During modesets pipe configuration was programmed as the
8059 if (new_crtc_state->uapi.color_mgmt_changed ||
8060 new_crtc_state->update_pipe)
8061 intel_color_commit_arm(new_crtc_state);
8063 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8064 bdw_set_pipemisc(new_crtc_state);
8066 if (new_crtc_state->update_pipe)
8067 intel_pipe_fastset(old_crtc_state, new_crtc_state);
8070 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8072 intel_atomic_update_watermarks(state, crtc);
8075 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8076 struct intel_crtc *crtc)
8078 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8079 const struct intel_crtc_state *new_crtc_state =
8080 intel_atomic_get_new_crtc_state(state, crtc);
8083 * Disable the scaler(s) after the plane(s) so that we don't
8084 * get a catastrophic underrun even if the two operations
8085 * end up happening in two different frames.
8087 if (DISPLAY_VER(dev_priv) >= 9 &&
8088 !intel_crtc_needs_modeset(new_crtc_state))
8089 skl_detach_scalers(new_crtc_state);
8092 static void intel_enable_crtc(struct intel_atomic_state *state,
8093 struct intel_crtc *crtc)
8095 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8096 const struct intel_crtc_state *new_crtc_state =
8097 intel_atomic_get_new_crtc_state(state, crtc);
8099 if (!intel_crtc_needs_modeset(new_crtc_state))
8102 intel_crtc_update_active_timings(new_crtc_state);
8104 dev_priv->display->crtc_enable(state, crtc);
8106 if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
8109 /* vblanks work again, re-enable pipe CRC. */
8110 intel_crtc_enable_pipe_crc(crtc);
8113 static void intel_update_crtc(struct intel_atomic_state *state,
8114 struct intel_crtc *crtc)
8116 struct drm_i915_private *i915 = to_i915(state->base.dev);
8117 const struct intel_crtc_state *old_crtc_state =
8118 intel_atomic_get_old_crtc_state(state, crtc);
8119 struct intel_crtc_state *new_crtc_state =
8120 intel_atomic_get_new_crtc_state(state, crtc);
8121 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8124 if (new_crtc_state->preload_luts &&
8125 (new_crtc_state->uapi.color_mgmt_changed ||
8126 new_crtc_state->update_pipe))
8127 intel_color_load_luts(new_crtc_state);
8129 intel_pre_plane_update(state, crtc);
8131 if (new_crtc_state->update_pipe)
8132 intel_encoders_update_pipe(state, crtc);
8134 if (DISPLAY_VER(i915) >= 11 &&
8135 new_crtc_state->update_pipe)
8136 icl_set_pipe_chicken(new_crtc_state);
8139 intel_fbc_update(state, crtc);
8142 (new_crtc_state->uapi.color_mgmt_changed ||
8143 new_crtc_state->update_pipe))
8144 intel_color_commit_noarm(new_crtc_state);
8146 intel_crtc_planes_update_noarm(state, crtc);
8148 /* Perform vblank evasion around commit operation */
8149 intel_pipe_update_start(new_crtc_state);
8151 commit_pipe_pre_planes(state, crtc);
8153 intel_crtc_planes_update_arm(state, crtc);
8155 commit_pipe_post_planes(state, crtc);
8157 intel_pipe_update_end(new_crtc_state);
8160 * We usually enable FIFO underrun interrupts as part of the
8161 * CRTC enable sequence during modesets. But when we inherit a
8162 * valid pipe configuration from the BIOS we need to take care
8163 * of enabling them on the CRTC's first fastset.
8165 if (new_crtc_state->update_pipe && !modeset &&
8166 old_crtc_state->inherited)
8167 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8170 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8171 struct intel_crtc_state *old_crtc_state,
8172 struct intel_crtc_state *new_crtc_state,
8173 struct intel_crtc *crtc)
8175 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8178 * We need to disable pipe CRC before disabling the pipe,
8179 * or we race against vblank off.
8181 intel_crtc_disable_pipe_crc(crtc);
8183 dev_priv->display->crtc_disable(state, crtc);
8184 crtc->active = false;
8185 intel_fbc_disable(crtc);
8186 intel_disable_shared_dpll(old_crtc_state);
8188 /* FIXME unify this for all platforms */
8189 if (!new_crtc_state->hw.active &&
8190 !HAS_GMCH(dev_priv))
8191 intel_initial_watermarks(state, crtc);
8194 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8196 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8197 struct intel_crtc *crtc;
8201 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8202 new_crtc_state, i) {
8203 if (!intel_crtc_needs_modeset(new_crtc_state))
8206 if (!old_crtc_state->hw.active)
8209 intel_pre_plane_update(state, crtc);
8210 intel_crtc_disable_planes(state, crtc);
8213 /* Only disable port sync and MST slaves */
8214 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8215 new_crtc_state, i) {
8216 if (!intel_crtc_needs_modeset(new_crtc_state))
8219 if (!old_crtc_state->hw.active)
8222 /* In case of Transcoder port Sync master slave CRTCs can be
8223 * assigned in any order and we need to make sure that
8224 * slave CRTCs are disabled first and then master CRTC since
8225 * Slave vblanks are masked till Master Vblanks.
8227 if (!is_trans_port_sync_slave(old_crtc_state) &&
8228 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8229 !intel_crtc_is_bigjoiner_slave(old_crtc_state))
8232 intel_old_crtc_state_disables(state, old_crtc_state,
8233 new_crtc_state, crtc);
8234 handled |= BIT(crtc->pipe);
8237 /* Disable everything else left on */
8238 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8239 new_crtc_state, i) {
8240 if (!intel_crtc_needs_modeset(new_crtc_state) ||
8241 (handled & BIT(crtc->pipe)))
8244 if (!old_crtc_state->hw.active)
8247 intel_old_crtc_state_disables(state, old_crtc_state,
8248 new_crtc_state, crtc);
8252 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8254 struct intel_crtc_state *new_crtc_state;
8255 struct intel_crtc *crtc;
8258 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8259 if (!new_crtc_state->hw.active)
8262 intel_enable_crtc(state, crtc);
8263 intel_update_crtc(state, crtc);
8267 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8269 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8270 struct intel_crtc *crtc;
8271 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8272 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8273 u8 update_pipes = 0, modeset_pipes = 0;
8276 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8277 enum pipe pipe = crtc->pipe;
8279 if (!new_crtc_state->hw.active)
8282 /* ignore allocations for crtc's that have been turned off. */
8283 if (!intel_crtc_needs_modeset(new_crtc_state)) {
8284 entries[pipe] = old_crtc_state->wm.skl.ddb;
8285 update_pipes |= BIT(pipe);
8287 modeset_pipes |= BIT(pipe);
8292 * Whenever the number of active pipes changes, we need to make sure we
8293 * update the pipes in the right order so that their ddb allocations
8294 * never overlap with each other between CRTC updates. Otherwise we'll
8295 * cause pipe underruns and other bad stuff.
8297 * So first lets enable all pipes that do not need a fullmodeset as
8298 * those don't have any external dependency.
8300 while (update_pipes) {
8301 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8302 new_crtc_state, i) {
8303 enum pipe pipe = crtc->pipe;
8305 if ((update_pipes & BIT(pipe)) == 0)
8308 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8309 entries, I915_MAX_PIPES, pipe))
8312 entries[pipe] = new_crtc_state->wm.skl.ddb;
8313 update_pipes &= ~BIT(pipe);
8315 intel_update_crtc(state, crtc);
8318 * If this is an already active pipe, it's DDB changed,
8319 * and this isn't the last pipe that needs updating
8320 * then we need to wait for a vblank to pass for the
8321 * new ddb allocation to take effect.
8323 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8324 &old_crtc_state->wm.skl.ddb) &&
8325 (update_pipes | modeset_pipes))
8326 intel_crtc_wait_for_next_vblank(crtc);
8330 update_pipes = modeset_pipes;
8333 * Enable all pipes that needs a modeset and do not depends on other
8336 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8337 enum pipe pipe = crtc->pipe;
8339 if ((modeset_pipes & BIT(pipe)) == 0)
8342 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8343 is_trans_port_sync_master(new_crtc_state) ||
8344 intel_crtc_is_bigjoiner_master(new_crtc_state))
8347 modeset_pipes &= ~BIT(pipe);
8349 intel_enable_crtc(state, crtc);
8353 * Then we enable all remaining pipes that depend on other
8354 * pipes: MST slaves and port sync masters, big joiner master
8356 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8357 enum pipe pipe = crtc->pipe;
8359 if ((modeset_pipes & BIT(pipe)) == 0)
8362 modeset_pipes &= ~BIT(pipe);
8364 intel_enable_crtc(state, crtc);
8368 * Finally we do the plane updates/etc. for all pipes that got enabled.
8370 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8371 enum pipe pipe = crtc->pipe;
8373 if ((update_pipes & BIT(pipe)) == 0)
8376 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8377 entries, I915_MAX_PIPES, pipe));
8379 entries[pipe] = new_crtc_state->wm.skl.ddb;
8380 update_pipes &= ~BIT(pipe);
8382 intel_update_crtc(state, crtc);
8385 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8386 drm_WARN_ON(&dev_priv->drm, update_pipes);
8389 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8391 struct intel_atomic_state *state, *next;
8392 struct llist_node *freed;
8394 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8395 llist_for_each_entry_safe(state, next, freed, freed)
8396 drm_atomic_state_put(&state->base);
8399 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8401 struct drm_i915_private *dev_priv =
8402 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8404 intel_atomic_helper_free_state(dev_priv);
8407 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8409 struct wait_queue_entry wait_fence, wait_reset;
8410 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8412 init_wait_entry(&wait_fence, 0);
8413 init_wait_entry(&wait_reset, 0);
8415 prepare_to_wait(&intel_state->commit_ready.wait,
8416 &wait_fence, TASK_UNINTERRUPTIBLE);
8417 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8418 I915_RESET_MODESET),
8419 &wait_reset, TASK_UNINTERRUPTIBLE);
8422 if (i915_sw_fence_done(&intel_state->commit_ready) ||
8423 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
8428 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8429 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8430 I915_RESET_MODESET),
8434 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8436 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8437 struct intel_crtc *crtc;
8440 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8442 intel_dsb_cleanup(old_crtc_state);
8445 static void intel_atomic_cleanup_work(struct work_struct *work)
8447 struct intel_atomic_state *state =
8448 container_of(work, struct intel_atomic_state, base.commit_work);
8449 struct drm_i915_private *i915 = to_i915(state->base.dev);
8451 intel_cleanup_dsbs(state);
8452 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8453 drm_atomic_helper_commit_cleanup_done(&state->base);
8454 drm_atomic_state_put(&state->base);
8456 intel_atomic_helper_free_state(i915);
8459 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8461 struct drm_i915_private *i915 = to_i915(state->base.dev);
8462 struct intel_plane *plane;
8463 struct intel_plane_state *plane_state;
8466 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8467 struct drm_framebuffer *fb = plane_state->hw.fb;
8474 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8479 * The layout of the fast clear color value expected by HW
8480 * (the DRM ABI requiring this value to be located in fb at
8481 * offset 0 of cc plane, plane #2 previous generations or
8482 * plane #1 for flat ccs):
8483 * - 4 x 4 bytes per-channel value
8484 * (in surface type specific float/int format provided by the fb user)
8485 * - 8 bytes native color value used by the display
8486 * (converted/written by GPU during a fast clear operation using the
8487 * above per-channel values)
8489 * The commit's FB prepare hook already ensured that FB obj is pinned and the
8490 * caller made sure that the object is synced wrt. the related color clear value
8493 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8494 fb->offsets[cc_plane] + 16,
8495 &plane_state->ccval,
8496 sizeof(plane_state->ccval));
8497 /* The above could only fail if the FB obj has an unexpected backing store type. */
8498 drm_WARN_ON(&i915->drm, ret);
8502 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8504 struct drm_device *dev = state->base.dev;
8505 struct drm_i915_private *dev_priv = to_i915(dev);
8506 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8507 struct intel_crtc *crtc;
8508 u64 put_domains[I915_MAX_PIPES] = {};
8509 intel_wakeref_t wakeref = 0;
8512 intel_atomic_commit_fence_wait(state);
8514 drm_atomic_helper_wait_for_dependencies(&state->base);
8517 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8519 intel_atomic_prepare_plane_clear_colors(state);
8521 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8522 new_crtc_state, i) {
8523 if (intel_crtc_needs_modeset(new_crtc_state) ||
8524 new_crtc_state->update_pipe) {
8526 put_domains[crtc->pipe] =
8527 modeset_get_crtc_power_domains(new_crtc_state);
8531 intel_commit_modeset_disables(state);
8533 /* FIXME: Eventually get rid of our crtc->config pointer */
8534 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8535 crtc->config = new_crtc_state;
8537 if (state->modeset) {
8538 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8540 intel_set_cdclk_pre_plane_update(state);
8542 intel_modeset_verify_disabled(dev_priv, state);
8545 intel_sagv_pre_plane_update(state);
8547 /* Complete the events for pipes that have now been disabled */
8548 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8549 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8551 /* Complete events for now disable pipes here. */
8552 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8553 spin_lock_irq(&dev->event_lock);
8554 drm_crtc_send_vblank_event(&crtc->base,
8555 new_crtc_state->uapi.event);
8556 spin_unlock_irq(&dev->event_lock);
8558 new_crtc_state->uapi.event = NULL;
8562 intel_encoders_update_prepare(state);
8564 intel_dbuf_pre_plane_update(state);
8565 intel_mbus_dbox_update(state);
8567 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8568 if (new_crtc_state->do_async_flip)
8569 intel_crtc_enable_flip_done(state, crtc);
8572 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8573 dev_priv->display->commit_modeset_enables(state);
8575 intel_encoders_update_complete(state);
8578 intel_set_cdclk_post_plane_update(state);
8580 intel_wait_for_vblank_workers(state);
8582 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8583 * already, but still need the state for the delayed optimization. To
8585 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8586 * - schedule that vblank worker _before_ calling hw_done
8587 * - at the start of commit_tail, cancel it _synchrously
8588 * - switch over to the vblank wait helper in the core after that since
8589 * we don't need out special handling any more.
8591 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8593 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8594 if (new_crtc_state->do_async_flip)
8595 intel_crtc_disable_flip_done(state, crtc);
8599 * Now that the vblank has passed, we can go ahead and program the
8600 * optimal watermarks on platforms that need two-step watermark
8603 * TODO: Move this (and other cleanup) to an async worker eventually.
8605 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8606 new_crtc_state, i) {
8608 * Gen2 reports pipe underruns whenever all planes are disabled.
8609 * So re-enable underrun reporting after some planes get enabled.
8611 * We do this before .optimize_watermarks() so that we have a
8612 * chance of catching underruns with the intermediate watermarks
8613 * vs. the new plane configuration.
8615 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8616 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8618 intel_optimize_watermarks(state, crtc);
8621 intel_dbuf_post_plane_update(state);
8622 intel_psr_post_plane_update(state);
8624 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8625 intel_post_plane_update(state, crtc);
8627 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8629 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8632 * DSB cleanup is done in cleanup_work aligning with framebuffer
8633 * cleanup. So copy and reset the dsb structure to sync with
8634 * commit_done and later do dsb cleanup in cleanup_work.
8636 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8639 /* Underruns don't always raise interrupts, so check manually */
8640 intel_check_cpu_fifo_underruns(dev_priv);
8641 intel_check_pch_fifo_underruns(dev_priv);
8644 intel_verify_planes(state);
8646 intel_sagv_post_plane_update(state);
8648 drm_atomic_helper_commit_hw_done(&state->base);
8650 if (state->modeset) {
8651 /* As one of the primary mmio accessors, KMS has a high
8652 * likelihood of triggering bugs in unclaimed access. After we
8653 * finish modesetting, see if an error has been flagged, and if
8654 * so enable debugging for the next modeset - and hope we catch
8657 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8658 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8660 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8663 * Defer the cleanup of the old state to a separate worker to not
8664 * impede the current task (userspace for blocking modesets) that
8665 * are executed inline. For out-of-line asynchronous modesets/flips,
8666 * deferring to a new worker seems overkill, but we would place a
8667 * schedule point (cond_resched()) here anyway to keep latencies
8670 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8671 queue_work(system_highpri_wq, &state->base.commit_work);
8674 static void intel_atomic_commit_work(struct work_struct *work)
8676 struct intel_atomic_state *state =
8677 container_of(work, struct intel_atomic_state, base.commit_work);
8679 intel_atomic_commit_tail(state);
8683 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8684 enum i915_sw_fence_notify notify)
8686 struct intel_atomic_state *state =
8687 container_of(fence, struct intel_atomic_state, commit_ready);
8690 case FENCE_COMPLETE:
8691 /* we do blocking waits in the worker, nothing to do here */
8695 struct intel_atomic_helper *helper =
8696 &to_i915(state->base.dev)->atomic_helper;
8698 if (llist_add(&state->freed, &helper->free_list))
8699 schedule_work(&helper->free_work);
8707 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8709 struct intel_plane_state *old_plane_state, *new_plane_state;
8710 struct intel_plane *plane;
8713 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8715 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8716 to_intel_frontbuffer(new_plane_state->hw.fb),
8717 plane->frontbuffer_bit);
8720 static int intel_atomic_commit(struct drm_device *dev,
8721 struct drm_atomic_state *_state,
8724 struct intel_atomic_state *state = to_intel_atomic_state(_state);
8725 struct drm_i915_private *dev_priv = to_i915(dev);
8728 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8730 drm_atomic_state_get(&state->base);
8731 i915_sw_fence_init(&state->commit_ready,
8732 intel_atomic_commit_ready);
8735 * The intel_legacy_cursor_update() fast path takes care
8736 * of avoiding the vblank waits for simple cursor
8737 * movement and flips. For cursor on/off and size changes,
8738 * we want to perform the vblank waits so that watermark
8739 * updates happen during the correct frames. Gen9+ have
8740 * double buffered watermarks and so shouldn't need this.
8742 * Unset state->legacy_cursor_update before the call to
8743 * drm_atomic_helper_setup_commit() because otherwise
8744 * drm_atomic_helper_wait_for_flip_done() is a noop and
8745 * we get FIFO underruns because we didn't wait
8748 * FIXME doing watermarks and fb cleanup from a vblank worker
8749 * (assuming we had any) would solve these problems.
8751 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8752 struct intel_crtc_state *new_crtc_state;
8753 struct intel_crtc *crtc;
8756 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8757 if (new_crtc_state->wm.need_postvbl_update ||
8758 new_crtc_state->update_wm_post)
8759 state->base.legacy_cursor_update = false;
8762 ret = intel_atomic_prepare_commit(state);
8764 drm_dbg_atomic(&dev_priv->drm,
8765 "Preparing state failed with %i\n", ret);
8766 i915_sw_fence_commit(&state->commit_ready);
8767 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8771 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8773 ret = drm_atomic_helper_swap_state(&state->base, true);
8775 intel_atomic_swap_global_state(state);
8778 struct intel_crtc_state *new_crtc_state;
8779 struct intel_crtc *crtc;
8782 i915_sw_fence_commit(&state->commit_ready);
8784 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8785 intel_dsb_cleanup(new_crtc_state);
8787 drm_atomic_helper_cleanup_planes(dev, &state->base);
8788 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8791 intel_shared_dpll_swap_state(state);
8792 intel_atomic_track_fbs(state);
8794 drm_atomic_state_get(&state->base);
8795 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8797 i915_sw_fence_commit(&state->commit_ready);
8798 if (nonblock && state->modeset) {
8799 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8800 } else if (nonblock) {
8801 queue_work(dev_priv->flip_wq, &state->base.commit_work);
8804 flush_workqueue(dev_priv->modeset_wq);
8805 intel_atomic_commit_tail(state);
8812 * intel_plane_destroy - destroy a plane
8813 * @plane: plane to destroy
8815 * Common destruction function for all types of planes (primary, cursor,
8818 void intel_plane_destroy(struct drm_plane *plane)
8820 drm_plane_cleanup(plane);
8821 kfree(to_intel_plane(plane));
8824 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8826 struct intel_plane *plane;
8828 for_each_intel_plane(&dev_priv->drm, plane) {
8829 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8832 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8837 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8838 struct drm_file *file)
8840 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8841 struct drm_crtc *drmmode_crtc;
8842 struct intel_crtc *crtc;
8844 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8848 crtc = to_intel_crtc(drmmode_crtc);
8849 pipe_from_crtc_id->pipe = crtc->pipe;
8854 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8856 struct drm_device *dev = encoder->base.dev;
8857 struct intel_encoder *source_encoder;
8858 u32 possible_clones = 0;
8860 for_each_intel_encoder(dev, source_encoder) {
8861 if (encoders_cloneable(encoder, source_encoder))
8862 possible_clones |= drm_encoder_mask(&source_encoder->base);
8865 return possible_clones;
8868 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8870 struct drm_device *dev = encoder->base.dev;
8871 struct intel_crtc *crtc;
8872 u32 possible_crtcs = 0;
8874 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
8875 possible_crtcs |= drm_crtc_mask(&crtc->base);
8877 return possible_crtcs;
8880 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
8882 if (!IS_MOBILE(dev_priv))
8885 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
8888 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
8894 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
8896 if (DISPLAY_VER(dev_priv) >= 9)
8899 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
8902 if (HAS_PCH_LPT_H(dev_priv) &&
8903 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
8906 /* DDI E can't be used if DDI A requires 4 lanes */
8907 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
8910 if (!dev_priv->vbt.int_crt_support)
8916 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
8918 struct intel_encoder *encoder;
8919 bool dpd_is_edp = false;
8921 intel_pps_unlock_regs_wa(dev_priv);
8923 if (!HAS_DISPLAY(dev_priv))
8926 if (IS_DG2(dev_priv)) {
8927 intel_ddi_init(dev_priv, PORT_A);
8928 intel_ddi_init(dev_priv, PORT_B);
8929 intel_ddi_init(dev_priv, PORT_C);
8930 intel_ddi_init(dev_priv, PORT_D_XELPD);
8931 intel_ddi_init(dev_priv, PORT_TC1);
8932 } else if (IS_ALDERLAKE_P(dev_priv)) {
8933 intel_ddi_init(dev_priv, PORT_A);
8934 intel_ddi_init(dev_priv, PORT_B);
8935 intel_ddi_init(dev_priv, PORT_TC1);
8936 intel_ddi_init(dev_priv, PORT_TC2);
8937 intel_ddi_init(dev_priv, PORT_TC3);
8938 intel_ddi_init(dev_priv, PORT_TC4);
8939 icl_dsi_init(dev_priv);
8940 } else if (IS_ALDERLAKE_S(dev_priv)) {
8941 intel_ddi_init(dev_priv, PORT_A);
8942 intel_ddi_init(dev_priv, PORT_TC1);
8943 intel_ddi_init(dev_priv, PORT_TC2);
8944 intel_ddi_init(dev_priv, PORT_TC3);
8945 intel_ddi_init(dev_priv, PORT_TC4);
8946 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
8947 intel_ddi_init(dev_priv, PORT_A);
8948 intel_ddi_init(dev_priv, PORT_B);
8949 intel_ddi_init(dev_priv, PORT_TC1);
8950 intel_ddi_init(dev_priv, PORT_TC2);
8951 } else if (DISPLAY_VER(dev_priv) >= 12) {
8952 intel_ddi_init(dev_priv, PORT_A);
8953 intel_ddi_init(dev_priv, PORT_B);
8954 intel_ddi_init(dev_priv, PORT_TC1);
8955 intel_ddi_init(dev_priv, PORT_TC2);
8956 intel_ddi_init(dev_priv, PORT_TC3);
8957 intel_ddi_init(dev_priv, PORT_TC4);
8958 intel_ddi_init(dev_priv, PORT_TC5);
8959 intel_ddi_init(dev_priv, PORT_TC6);
8960 icl_dsi_init(dev_priv);
8961 } else if (IS_JSL_EHL(dev_priv)) {
8962 intel_ddi_init(dev_priv, PORT_A);
8963 intel_ddi_init(dev_priv, PORT_B);
8964 intel_ddi_init(dev_priv, PORT_C);
8965 intel_ddi_init(dev_priv, PORT_D);
8966 icl_dsi_init(dev_priv);
8967 } else if (DISPLAY_VER(dev_priv) == 11) {
8968 intel_ddi_init(dev_priv, PORT_A);
8969 intel_ddi_init(dev_priv, PORT_B);
8970 intel_ddi_init(dev_priv, PORT_C);
8971 intel_ddi_init(dev_priv, PORT_D);
8972 intel_ddi_init(dev_priv, PORT_E);
8973 intel_ddi_init(dev_priv, PORT_F);
8974 icl_dsi_init(dev_priv);
8975 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
8976 intel_ddi_init(dev_priv, PORT_A);
8977 intel_ddi_init(dev_priv, PORT_B);
8978 intel_ddi_init(dev_priv, PORT_C);
8979 vlv_dsi_init(dev_priv);
8980 } else if (DISPLAY_VER(dev_priv) >= 9) {
8981 intel_ddi_init(dev_priv, PORT_A);
8982 intel_ddi_init(dev_priv, PORT_B);
8983 intel_ddi_init(dev_priv, PORT_C);
8984 intel_ddi_init(dev_priv, PORT_D);
8985 intel_ddi_init(dev_priv, PORT_E);
8986 } else if (HAS_DDI(dev_priv)) {
8989 if (intel_ddi_crt_present(dev_priv))
8990 intel_crt_init(dev_priv);
8992 /* Haswell uses DDI functions to detect digital outputs. */
8993 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
8995 intel_ddi_init(dev_priv, PORT_A);
8997 found = intel_de_read(dev_priv, SFUSE_STRAP);
8998 if (found & SFUSE_STRAP_DDIB_DETECTED)
8999 intel_ddi_init(dev_priv, PORT_B);
9000 if (found & SFUSE_STRAP_DDIC_DETECTED)
9001 intel_ddi_init(dev_priv, PORT_C);
9002 if (found & SFUSE_STRAP_DDID_DETECTED)
9003 intel_ddi_init(dev_priv, PORT_D);
9004 if (found & SFUSE_STRAP_DDIF_DETECTED)
9005 intel_ddi_init(dev_priv, PORT_F);
9006 } else if (HAS_PCH_SPLIT(dev_priv)) {
9010 * intel_edp_init_connector() depends on this completing first,
9011 * to prevent the registration of both eDP and LVDS and the
9012 * incorrect sharing of the PPS.
9014 intel_lvds_init(dev_priv);
9015 intel_crt_init(dev_priv);
9017 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9019 if (ilk_has_edp_a(dev_priv))
9020 g4x_dp_init(dev_priv, DP_A, PORT_A);
9022 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9023 /* PCH SDVOB multiplex with HDMIB */
9024 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9026 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9027 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9028 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9031 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9032 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9034 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9035 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9037 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9038 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9040 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9041 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9042 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9043 bool has_edp, has_port;
9045 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9046 intel_crt_init(dev_priv);
9049 * The DP_DETECTED bit is the latched state of the DDC
9050 * SDA pin at boot. However since eDP doesn't require DDC
9051 * (no way to plug in a DP->HDMI dongle) the DDC pins for
9052 * eDP ports may have been muxed to an alternate function.
9053 * Thus we can't rely on the DP_DETECTED bit alone to detect
9054 * eDP ports. Consult the VBT as well as DP_DETECTED to
9057 * Sadly the straps seem to be missing sometimes even for HDMI
9058 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9059 * and VBT for the presence of the port. Additionally we can't
9060 * trust the port type the VBT declares as we've seen at least
9061 * HDMI ports that the VBT claim are DP or eDP.
9063 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9064 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9065 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9066 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9067 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9068 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9070 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9071 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9072 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9073 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9074 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9075 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9077 if (IS_CHERRYVIEW(dev_priv)) {
9079 * eDP not supported on port D,
9080 * so no need to worry about it
9082 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9083 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9084 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9085 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9086 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9089 vlv_dsi_init(dev_priv);
9090 } else if (IS_PINEVIEW(dev_priv)) {
9091 intel_lvds_init(dev_priv);
9092 intel_crt_init(dev_priv);
9093 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9096 if (IS_MOBILE(dev_priv))
9097 intel_lvds_init(dev_priv);
9099 intel_crt_init(dev_priv);
9101 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9102 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9103 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9104 if (!found && IS_G4X(dev_priv)) {
9105 drm_dbg_kms(&dev_priv->drm,
9106 "probing HDMI on SDVOB\n");
9107 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9110 if (!found && IS_G4X(dev_priv))
9111 g4x_dp_init(dev_priv, DP_B, PORT_B);
9114 /* Before G4X SDVOC doesn't have its own detect register */
9116 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9117 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9118 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9121 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9123 if (IS_G4X(dev_priv)) {
9124 drm_dbg_kms(&dev_priv->drm,
9125 "probing HDMI on SDVOC\n");
9126 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9128 if (IS_G4X(dev_priv))
9129 g4x_dp_init(dev_priv, DP_C, PORT_C);
9132 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9133 g4x_dp_init(dev_priv, DP_D, PORT_D);
9135 if (SUPPORTS_TV(dev_priv))
9136 intel_tv_init(dev_priv);
9137 } else if (DISPLAY_VER(dev_priv) == 2) {
9138 if (IS_I85X(dev_priv))
9139 intel_lvds_init(dev_priv);
9141 intel_crt_init(dev_priv);
9142 intel_dvo_init(dev_priv);
9145 for_each_intel_encoder(&dev_priv->drm, encoder) {
9146 encoder->base.possible_crtcs =
9147 intel_encoder_possible_crtcs(encoder);
9148 encoder->base.possible_clones =
9149 intel_encoder_possible_clones(encoder);
9152 intel_init_pch_refclk(dev_priv);
9154 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9157 static enum drm_mode_status
9158 intel_mode_valid(struct drm_device *dev,
9159 const struct drm_display_mode *mode)
9161 struct drm_i915_private *dev_priv = to_i915(dev);
9162 int hdisplay_max, htotal_max;
9163 int vdisplay_max, vtotal_max;
9166 * Can't reject DBLSCAN here because Xorg ddxen can add piles
9167 * of DBLSCAN modes to the output's mode list when they detect
9168 * the scaling mode property on the connector. And they don't
9169 * ask the kernel to validate those modes in any way until
9170 * modeset time at which point the client gets a protocol error.
9171 * So in order to not upset those clients we silently ignore the
9172 * DBLSCAN flag on such connectors. For other connectors we will
9173 * reject modes with the DBLSCAN flag in encoder->compute_config().
9174 * And we always reject DBLSCAN modes in connector->mode_valid()
9175 * as we never want such modes on the connector's mode list.
9178 if (mode->vscan > 1)
9179 return MODE_NO_VSCAN;
9181 if (mode->flags & DRM_MODE_FLAG_HSKEW)
9182 return MODE_H_ILLEGAL;
9184 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9185 DRM_MODE_FLAG_NCSYNC |
9186 DRM_MODE_FLAG_PCSYNC))
9189 if (mode->flags & (DRM_MODE_FLAG_BCAST |
9190 DRM_MODE_FLAG_PIXMUX |
9191 DRM_MODE_FLAG_CLKDIV2))
9194 /* Transcoder timing limits */
9195 if (DISPLAY_VER(dev_priv) >= 11) {
9196 hdisplay_max = 16384;
9197 vdisplay_max = 8192;
9200 } else if (DISPLAY_VER(dev_priv) >= 9 ||
9201 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9202 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9203 vdisplay_max = 4096;
9206 } else if (DISPLAY_VER(dev_priv) >= 3) {
9207 hdisplay_max = 4096;
9208 vdisplay_max = 4096;
9212 hdisplay_max = 2048;
9213 vdisplay_max = 2048;
9218 if (mode->hdisplay > hdisplay_max ||
9219 mode->hsync_start > htotal_max ||
9220 mode->hsync_end > htotal_max ||
9221 mode->htotal > htotal_max)
9222 return MODE_H_ILLEGAL;
9224 if (mode->vdisplay > vdisplay_max ||
9225 mode->vsync_start > vtotal_max ||
9226 mode->vsync_end > vtotal_max ||
9227 mode->vtotal > vtotal_max)
9228 return MODE_V_ILLEGAL;
9230 if (DISPLAY_VER(dev_priv) >= 5) {
9231 if (mode->hdisplay < 64 ||
9232 mode->htotal - mode->hdisplay < 32)
9233 return MODE_H_ILLEGAL;
9235 if (mode->vtotal - mode->vdisplay < 5)
9236 return MODE_V_ILLEGAL;
9238 if (mode->htotal - mode->hdisplay < 32)
9239 return MODE_H_ILLEGAL;
9241 if (mode->vtotal - mode->vdisplay < 3)
9242 return MODE_V_ILLEGAL;
9246 * Cantiga+ cannot handle modes with a hsync front porch of 0.
9247 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9249 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9250 mode->hsync_start == mode->hdisplay)
9251 return MODE_H_ILLEGAL;
9256 enum drm_mode_status
9257 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9258 const struct drm_display_mode *mode,
9261 int plane_width_max, plane_height_max;
9264 * intel_mode_valid() should be
9265 * sufficient on older platforms.
9267 if (DISPLAY_VER(dev_priv) < 9)
9271 * Most people will probably want a fullscreen
9272 * plane so let's not advertize modes that are
9275 if (DISPLAY_VER(dev_priv) >= 11) {
9276 plane_width_max = 5120 << bigjoiner;
9277 plane_height_max = 4320;
9279 plane_width_max = 5120;
9280 plane_height_max = 4096;
9283 if (mode->hdisplay > plane_width_max)
9284 return MODE_H_ILLEGAL;
9286 if (mode->vdisplay > plane_height_max)
9287 return MODE_V_ILLEGAL;
9292 static const struct drm_mode_config_funcs intel_mode_funcs = {
9293 .fb_create = intel_user_framebuffer_create,
9294 .get_format_info = intel_fb_get_format_info,
9295 .output_poll_changed = intel_fbdev_output_poll_changed,
9296 .mode_valid = intel_mode_valid,
9297 .atomic_check = intel_atomic_check,
9298 .atomic_commit = intel_atomic_commit,
9299 .atomic_state_alloc = intel_atomic_state_alloc,
9300 .atomic_state_clear = intel_atomic_state_clear,
9301 .atomic_state_free = intel_atomic_state_free,
9304 static const struct drm_i915_display_funcs skl_display_funcs = {
9305 .get_pipe_config = hsw_get_pipe_config,
9306 .crtc_enable = hsw_crtc_enable,
9307 .crtc_disable = hsw_crtc_disable,
9308 .commit_modeset_enables = skl_commit_modeset_enables,
9309 .get_initial_plane_config = skl_get_initial_plane_config,
9312 static const struct drm_i915_display_funcs ddi_display_funcs = {
9313 .get_pipe_config = hsw_get_pipe_config,
9314 .crtc_enable = hsw_crtc_enable,
9315 .crtc_disable = hsw_crtc_disable,
9316 .commit_modeset_enables = intel_commit_modeset_enables,
9317 .get_initial_plane_config = i9xx_get_initial_plane_config,
9320 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9321 .get_pipe_config = ilk_get_pipe_config,
9322 .crtc_enable = ilk_crtc_enable,
9323 .crtc_disable = ilk_crtc_disable,
9324 .commit_modeset_enables = intel_commit_modeset_enables,
9325 .get_initial_plane_config = i9xx_get_initial_plane_config,
9328 static const struct drm_i915_display_funcs vlv_display_funcs = {
9329 .get_pipe_config = i9xx_get_pipe_config,
9330 .crtc_enable = valleyview_crtc_enable,
9331 .crtc_disable = i9xx_crtc_disable,
9332 .commit_modeset_enables = intel_commit_modeset_enables,
9333 .get_initial_plane_config = i9xx_get_initial_plane_config,
9336 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9337 .get_pipe_config = i9xx_get_pipe_config,
9338 .crtc_enable = i9xx_crtc_enable,
9339 .crtc_disable = i9xx_crtc_disable,
9340 .commit_modeset_enables = intel_commit_modeset_enables,
9341 .get_initial_plane_config = i9xx_get_initial_plane_config,
9345 * intel_init_display_hooks - initialize the display modesetting hooks
9346 * @dev_priv: device private
9348 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9350 if (!HAS_DISPLAY(dev_priv))
9353 intel_init_cdclk_hooks(dev_priv);
9354 intel_audio_hooks_init(dev_priv);
9356 intel_dpll_init_clock_hook(dev_priv);
9358 if (DISPLAY_VER(dev_priv) >= 9) {
9359 dev_priv->display = &skl_display_funcs;
9360 } else if (HAS_DDI(dev_priv)) {
9361 dev_priv->display = &ddi_display_funcs;
9362 } else if (HAS_PCH_SPLIT(dev_priv)) {
9363 dev_priv->display = &pch_split_display_funcs;
9364 } else if (IS_CHERRYVIEW(dev_priv) ||
9365 IS_VALLEYVIEW(dev_priv)) {
9366 dev_priv->display = &vlv_display_funcs;
9368 dev_priv->display = &i9xx_display_funcs;
9371 intel_fdi_init_hook(dev_priv);
9374 void intel_modeset_init_hw(struct drm_i915_private *i915)
9376 struct intel_cdclk_state *cdclk_state;
9378 if (!HAS_DISPLAY(i915))
9381 cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9383 intel_update_cdclk(i915);
9384 intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
9385 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9388 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9390 struct drm_plane *plane;
9391 struct intel_crtc *crtc;
9393 for_each_intel_crtc(state->dev, crtc) {
9394 struct intel_crtc_state *crtc_state;
9396 crtc_state = intel_atomic_get_crtc_state(state, crtc);
9397 if (IS_ERR(crtc_state))
9398 return PTR_ERR(crtc_state);
9400 if (crtc_state->hw.active) {
9402 * Preserve the inherited flag to avoid
9403 * taking the full modeset path.
9405 crtc_state->inherited = true;
9409 drm_for_each_plane(plane, state->dev) {
9410 struct drm_plane_state *plane_state;
9412 plane_state = drm_atomic_get_plane_state(state, plane);
9413 if (IS_ERR(plane_state))
9414 return PTR_ERR(plane_state);
9421 * Calculate what we think the watermarks should be for the state we've read
9422 * out of the hardware and then immediately program those watermarks so that
9423 * we ensure the hardware settings match our internal state.
9425 * We can calculate what we think WM's should be by creating a duplicate of the
9426 * current state (which was constructed during hardware readout) and running it
9427 * through the atomic check code to calculate new watermark values in the
9430 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9432 struct drm_atomic_state *state;
9433 struct intel_atomic_state *intel_state;
9434 struct intel_crtc *crtc;
9435 struct intel_crtc_state *crtc_state;
9436 struct drm_modeset_acquire_ctx ctx;
9440 /* Only supported on platforms that use atomic watermark design */
9441 if (!dev_priv->wm_disp->optimize_watermarks)
9444 state = drm_atomic_state_alloc(&dev_priv->drm);
9445 if (drm_WARN_ON(&dev_priv->drm, !state))
9448 intel_state = to_intel_atomic_state(state);
9450 drm_modeset_acquire_init(&ctx, 0);
9453 state->acquire_ctx = &ctx;
9456 * Hardware readout is the only time we don't want to calculate
9457 * intermediate watermarks (since we don't trust the current
9460 if (!HAS_GMCH(dev_priv))
9461 intel_state->skip_intermediate_wm = true;
9463 ret = sanitize_watermarks_add_affected(state);
9467 ret = intel_atomic_check(&dev_priv->drm, state);
9471 /* Write calculated watermark values back */
9472 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9473 crtc_state->wm.need_postvbl_update = true;
9474 intel_optimize_watermarks(intel_state, crtc);
9476 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9480 if (ret == -EDEADLK) {
9481 drm_atomic_state_clear(state);
9482 drm_modeset_backoff(&ctx);
9487 * If we fail here, it means that the hardware appears to be
9488 * programmed in a way that shouldn't be possible, given our
9489 * understanding of watermark requirements. This might mean a
9490 * mistake in the hardware readout code or a mistake in the
9491 * watermark calculations for a given platform. Raise a WARN
9492 * so that this is noticeable.
9494 * If this actually happens, we'll have to just leave the
9495 * BIOS-programmed watermarks untouched and hope for the best.
9497 drm_WARN(&dev_priv->drm, ret,
9498 "Could not determine valid watermarks for inherited state\n");
9500 drm_atomic_state_put(state);
9502 drm_modeset_drop_locks(&ctx);
9503 drm_modeset_acquire_fini(&ctx);
9506 static int intel_initial_commit(struct drm_device *dev)
9508 struct drm_atomic_state *state = NULL;
9509 struct drm_modeset_acquire_ctx ctx;
9510 struct intel_crtc *crtc;
9513 state = drm_atomic_state_alloc(dev);
9517 drm_modeset_acquire_init(&ctx, 0);
9520 state->acquire_ctx = &ctx;
9522 for_each_intel_crtc(dev, crtc) {
9523 struct intel_crtc_state *crtc_state =
9524 intel_atomic_get_crtc_state(state, crtc);
9526 if (IS_ERR(crtc_state)) {
9527 ret = PTR_ERR(crtc_state);
9531 if (crtc_state->hw.active) {
9532 struct intel_encoder *encoder;
9535 * We've not yet detected sink capabilities
9536 * (audio,infoframes,etc.) and thus we don't want to
9537 * force a full state recomputation yet. We want that to
9538 * happen only for the first real commit from userspace.
9539 * So preserve the inherited flag for the time being.
9541 crtc_state->inherited = true;
9543 ret = drm_atomic_add_affected_planes(state, &crtc->base);
9548 * FIXME hack to force a LUT update to avoid the
9549 * plane update forcing the pipe gamma on without
9550 * having a proper LUT loaded. Remove once we
9551 * have readout for pipe gamma enable.
9553 crtc_state->uapi.color_mgmt_changed = true;
9555 for_each_intel_encoder_mask(dev, encoder,
9556 crtc_state->uapi.encoder_mask) {
9557 if (encoder->initial_fastset_check &&
9558 !encoder->initial_fastset_check(encoder, crtc_state)) {
9559 ret = drm_atomic_add_affected_connectors(state,
9568 ret = drm_atomic_commit(state);
9571 if (ret == -EDEADLK) {
9572 drm_atomic_state_clear(state);
9573 drm_modeset_backoff(&ctx);
9577 drm_atomic_state_put(state);
9579 drm_modeset_drop_locks(&ctx);
9580 drm_modeset_acquire_fini(&ctx);
9585 static void intel_mode_config_init(struct drm_i915_private *i915)
9587 struct drm_mode_config *mode_config = &i915->drm.mode_config;
9589 drm_mode_config_init(&i915->drm);
9590 INIT_LIST_HEAD(&i915->global_obj_list);
9592 mode_config->min_width = 0;
9593 mode_config->min_height = 0;
9595 mode_config->preferred_depth = 24;
9596 mode_config->prefer_shadow = 1;
9598 mode_config->funcs = &intel_mode_funcs;
9600 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9603 * Maximum framebuffer dimensions, chosen to match
9604 * the maximum render engine surface size on gen4+.
9606 if (DISPLAY_VER(i915) >= 7) {
9607 mode_config->max_width = 16384;
9608 mode_config->max_height = 16384;
9609 } else if (DISPLAY_VER(i915) >= 4) {
9610 mode_config->max_width = 8192;
9611 mode_config->max_height = 8192;
9612 } else if (DISPLAY_VER(i915) == 3) {
9613 mode_config->max_width = 4096;
9614 mode_config->max_height = 4096;
9616 mode_config->max_width = 2048;
9617 mode_config->max_height = 2048;
9620 if (IS_I845G(i915) || IS_I865G(i915)) {
9621 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9622 mode_config->cursor_height = 1023;
9623 } else if (IS_I830(i915) || IS_I85X(i915) ||
9624 IS_I915G(i915) || IS_I915GM(i915)) {
9625 mode_config->cursor_width = 64;
9626 mode_config->cursor_height = 64;
9628 mode_config->cursor_width = 256;
9629 mode_config->cursor_height = 256;
9633 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9635 intel_atomic_global_obj_cleanup(i915);
9636 drm_mode_config_cleanup(&i915->drm);
9639 /* part #1: call before irq install */
9640 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9644 if (i915_inject_probe_failure(i915))
9647 if (HAS_DISPLAY(i915)) {
9648 ret = drm_vblank_init(&i915->drm,
9649 INTEL_NUM_PIPES(i915));
9654 intel_bios_init(i915);
9656 ret = intel_vga_register(i915);
9660 /* FIXME: completely on the wrong abstraction layer */
9661 intel_power_domains_init_hw(i915, false);
9663 if (!HAS_DISPLAY(i915))
9666 intel_dmc_ucode_init(i915);
9668 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9669 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9670 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9672 i915->window2_delay = 0; /* No DSB so no window2 delay */
9674 intel_mode_config_init(i915);
9676 ret = intel_cdclk_init(i915);
9678 goto cleanup_vga_client_pw_domain_dmc;
9680 ret = intel_dbuf_init(i915);
9682 goto cleanup_vga_client_pw_domain_dmc;
9684 ret = intel_bw_init(i915);
9686 goto cleanup_vga_client_pw_domain_dmc;
9688 init_llist_head(&i915->atomic_helper.free_list);
9689 INIT_WORK(&i915->atomic_helper.free_work,
9690 intel_atomic_helper_free_state_worker);
9692 intel_init_quirks(i915);
9694 intel_fbc_init(i915);
9698 cleanup_vga_client_pw_domain_dmc:
9699 intel_dmc_ucode_fini(i915);
9700 intel_power_domains_driver_remove(i915);
9701 intel_vga_unregister(i915);
9703 intel_bios_driver_remove(i915);
9708 /* part #2: call after irq install, but before gem init */
9709 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9711 struct drm_device *dev = &i915->drm;
9713 struct intel_crtc *crtc;
9716 if (!HAS_DISPLAY(i915))
9719 intel_init_pm(i915);
9721 intel_panel_sanitize_ssc(i915);
9723 intel_pps_setup(i915);
9725 intel_gmbus_setup(i915);
9727 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9728 INTEL_NUM_PIPES(i915),
9729 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9731 for_each_pipe(i915, pipe) {
9732 ret = intel_crtc_init(i915, pipe);
9734 intel_mode_config_cleanup(i915);
9739 intel_plane_possible_crtcs_init(i915);
9740 intel_shared_dpll_init(dev);
9741 intel_fdi_pll_freq_update(i915);
9743 intel_update_czclk(i915);
9744 intel_modeset_init_hw(i915);
9745 intel_dpll_update_ref_clks(i915);
9747 intel_hdcp_component_init(i915);
9749 if (i915->max_cdclk_freq == 0)
9750 intel_update_max_cdclk(i915);
9753 * If the platform has HTI, we need to find out whether it has reserved
9754 * any display resources before we create our display outputs.
9756 if (INTEL_INFO(i915)->display.has_hti)
9757 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9759 /* Just disable it once at startup */
9760 intel_vga_disable(i915);
9761 intel_setup_outputs(i915);
9763 drm_modeset_lock_all(dev);
9764 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9765 intel_acpi_assign_connector_fwnodes(i915);
9766 drm_modeset_unlock_all(dev);
9768 for_each_intel_crtc(dev, crtc) {
9769 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9771 intel_crtc_initial_plane_config(crtc);
9775 * Make sure hardware watermarks really match the state we read out.
9776 * Note that we need to do this after reconstructing the BIOS fb's
9777 * since the watermark calculation done here will use pstate->fb.
9779 if (!HAS_GMCH(i915))
9780 sanitize_watermarks(i915);
9785 /* part #3: call after gem init */
9786 int intel_modeset_init(struct drm_i915_private *i915)
9790 if (!HAS_DISPLAY(i915))
9794 * Force all active planes to recompute their states. So that on
9795 * mode_setcrtc after probe, all the intel_plane_state variables
9796 * are already calculated and there is no assert_plane warnings
9799 ret = intel_initial_commit(&i915->drm);
9801 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9803 intel_overlay_setup(i915);
9805 ret = intel_fbdev_init(&i915->drm);
9809 /* Only enable hotplug handling once the fbdev is fully set up. */
9810 intel_hpd_init(i915);
9811 intel_hpd_poll_disable(i915);
9813 intel_init_ipc(i915);
9818 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9820 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9821 /* 640x480@60Hz, ~25175 kHz */
9822 struct dpll clock = {
9832 drm_WARN_ON(&dev_priv->drm,
9833 i9xx_calc_dpll_params(48000, &clock) != 25154);
9835 drm_dbg_kms(&dev_priv->drm,
9836 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9837 pipe_name(pipe), clock.vco, clock.dot);
9839 fp = i9xx_dpll_compute_fp(&clock);
9840 dpll = DPLL_DVO_2X_MODE |
9842 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9843 PLL_P2_DIVIDE_BY_4 |
9844 PLL_REF_INPUT_DREFCLK |
9847 intel_de_write(dev_priv, FP0(pipe), fp);
9848 intel_de_write(dev_priv, FP1(pipe), fp);
9850 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9851 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9852 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9853 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9854 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9855 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9856 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9859 * Apparently we need to have VGA mode enabled prior to changing
9860 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9861 * dividers, even though the register value does change.
9863 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9864 intel_de_write(dev_priv, DPLL(pipe), dpll);
9866 /* Wait for the clocks to stabilize. */
9867 intel_de_posting_read(dev_priv, DPLL(pipe));
9870 /* The pixel multiplier can only be updated once the
9871 * DPLL is enabled and the clocks are stable.
9873 * So write it again.
9875 intel_de_write(dev_priv, DPLL(pipe), dpll);
9877 /* We do this three times for luck */
9878 for (i = 0; i < 3 ; i++) {
9879 intel_de_write(dev_priv, DPLL(pipe), dpll);
9880 intel_de_posting_read(dev_priv, DPLL(pipe));
9881 udelay(150); /* wait for warmup */
9884 intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
9885 intel_de_posting_read(dev_priv, PIPECONF(pipe));
9887 intel_wait_for_pipe_scanline_moving(crtc);
9890 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9892 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9894 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
9897 drm_WARN_ON(&dev_priv->drm,
9898 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
9899 drm_WARN_ON(&dev_priv->drm,
9900 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
9901 drm_WARN_ON(&dev_priv->drm,
9902 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
9903 drm_WARN_ON(&dev_priv->drm,
9904 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
9905 drm_WARN_ON(&dev_priv->drm,
9906 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
9908 intel_de_write(dev_priv, PIPECONF(pipe), 0);
9909 intel_de_posting_read(dev_priv, PIPECONF(pipe));
9911 intel_wait_for_pipe_scanline_stopped(crtc);
9913 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
9914 intel_de_posting_read(dev_priv, DPLL(pipe));
9918 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
9920 struct intel_crtc *crtc;
9922 if (DISPLAY_VER(dev_priv) >= 4)
9925 for_each_intel_crtc(&dev_priv->drm, crtc) {
9926 struct intel_plane *plane =
9927 to_intel_plane(crtc->base.primary);
9928 struct intel_crtc *plane_crtc;
9931 if (!plane->get_hw_state(plane, &pipe))
9934 if (pipe == crtc->pipe)
9937 drm_dbg_kms(&dev_priv->drm,
9938 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
9939 plane->base.base.id, plane->base.name);
9941 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
9942 intel_plane_disable_noatomic(plane_crtc, plane);
9946 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
9948 struct drm_device *dev = crtc->base.dev;
9949 struct intel_encoder *encoder;
9951 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
9957 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
9959 struct drm_device *dev = encoder->base.dev;
9960 struct intel_connector *connector;
9962 for_each_connector_on_encoder(dev, &encoder->base, connector)
9968 static void intel_sanitize_crtc(struct intel_crtc *crtc,
9969 struct drm_modeset_acquire_ctx *ctx)
9971 struct drm_device *dev = crtc->base.dev;
9972 struct drm_i915_private *dev_priv = to_i915(dev);
9973 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
9975 if (crtc_state->hw.active) {
9976 struct intel_plane *plane;
9978 /* Disable everything but the primary plane */
9979 for_each_intel_plane_on_crtc(dev, crtc, plane) {
9980 const struct intel_plane_state *plane_state =
9981 to_intel_plane_state(plane->base.state);
9983 if (plane_state->uapi.visible &&
9984 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
9985 intel_plane_disable_noatomic(crtc, plane);
9988 /* Disable any background color/etc. set by the BIOS */
9989 intel_color_commit_noarm(crtc_state);
9990 intel_color_commit_arm(crtc_state);
9993 /* Adjust the state of the output pipe according to whether we
9994 * have active connectors/encoders. */
9995 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
9996 !intel_crtc_is_bigjoiner_slave(crtc_state))
9997 intel_crtc_disable_noatomic(crtc, ctx);
9999 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10001 * We start out with underrun reporting disabled to avoid races.
10002 * For correct bookkeeping mark this on active crtcs.
10004 * Also on gmch platforms we dont have any hardware bits to
10005 * disable the underrun reporting. Which means we need to start
10006 * out with underrun reporting disabled also on inactive pipes,
10007 * since otherwise we'll complain about the garbage we read when
10008 * e.g. coming up after runtime pm.
10010 * No protection against concurrent access is required - at
10011 * worst a fifo underrun happens which also sets this to false.
10013 crtc->cpu_fifo_underrun_disabled = true;
10015 * We track the PCH trancoder underrun reporting state
10016 * within the crtc. With crtc for pipe A housing the underrun
10017 * reporting state for PCH transcoder A, crtc for pipe B housing
10018 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10019 * and marking underrun reporting as disabled for the non-existing
10020 * PCH transcoders B and C would prevent enabling the south
10021 * error interrupt (see cpt_can_enable_serr_int()).
10023 if (intel_has_pch_trancoder(dev_priv, crtc->pipe))
10024 crtc->pch_fifo_underrun_disabled = true;
10028 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10030 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10033 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10034 * the hardware when a high res displays plugged in. DPLL P
10035 * divider is zero, and the pipe timings are bonkers. We'll
10036 * try to disable everything in that case.
10038 * FIXME would be nice to be able to sanitize this state
10039 * without several WARNs, but for now let's take the easy
10042 return IS_SANDYBRIDGE(dev_priv) &&
10043 crtc_state->hw.active &&
10044 crtc_state->shared_dpll &&
10045 crtc_state->port_clock == 0;
10048 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10050 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10051 struct intel_connector *connector;
10052 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10053 struct intel_crtc_state *crtc_state = crtc ?
10054 to_intel_crtc_state(crtc->base.state) : NULL;
10056 /* We need to check both for a crtc link (meaning that the
10057 * encoder is active and trying to read from a pipe) and the
10058 * pipe itself being active. */
10059 bool has_active_crtc = crtc_state &&
10060 crtc_state->hw.active;
10062 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10063 drm_dbg_kms(&dev_priv->drm,
10064 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10065 pipe_name(crtc->pipe));
10066 has_active_crtc = false;
10069 connector = intel_encoder_find_connector(encoder);
10070 if (connector && !has_active_crtc) {
10071 drm_dbg_kms(&dev_priv->drm,
10072 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10073 encoder->base.base.id,
10074 encoder->base.name);
10076 /* Connector is active, but has no active pipe. This is
10077 * fallout from our resume register restoring. Disable
10078 * the encoder manually again. */
10080 struct drm_encoder *best_encoder;
10082 drm_dbg_kms(&dev_priv->drm,
10083 "[ENCODER:%d:%s] manually disabled\n",
10084 encoder->base.base.id,
10085 encoder->base.name);
10087 /* avoid oopsing in case the hooks consult best_encoder */
10088 best_encoder = connector->base.state->best_encoder;
10089 connector->base.state->best_encoder = &encoder->base;
10091 /* FIXME NULL atomic state passed! */
10092 if (encoder->disable)
10093 encoder->disable(NULL, encoder, crtc_state,
10094 connector->base.state);
10095 if (encoder->post_disable)
10096 encoder->post_disable(NULL, encoder, crtc_state,
10097 connector->base.state);
10099 connector->base.state->best_encoder = best_encoder;
10101 encoder->base.crtc = NULL;
10103 /* Inconsistent output/port/pipe state happens presumably due to
10104 * a bug in one of the get_hw_state functions. Or someplace else
10105 * in our code, like the register restore mess on resume. Clamp
10106 * things to off as a safer default. */
10108 connector->base.dpms = DRM_MODE_DPMS_OFF;
10109 connector->base.encoder = NULL;
10112 /* notify opregion of the sanitized encoder state */
10113 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10115 if (HAS_DDI(dev_priv))
10116 intel_ddi_sanitize_encoder_pll_mapping(encoder);
10119 /* FIXME read out full plane state for all planes */
10120 static void readout_plane_state(struct drm_i915_private *dev_priv)
10122 struct intel_plane *plane;
10123 struct intel_crtc *crtc;
10125 for_each_intel_plane(&dev_priv->drm, plane) {
10126 struct intel_plane_state *plane_state =
10127 to_intel_plane_state(plane->base.state);
10128 struct intel_crtc_state *crtc_state;
10129 enum pipe pipe = PIPE_A;
10132 visible = plane->get_hw_state(plane, &pipe);
10134 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10135 crtc_state = to_intel_crtc_state(crtc->base.state);
10137 intel_set_plane_visible(crtc_state, plane_state, visible);
10139 drm_dbg_kms(&dev_priv->drm,
10140 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10141 plane->base.base.id, plane->base.name,
10142 str_enabled_disabled(visible), pipe_name(pipe));
10145 for_each_intel_crtc(&dev_priv->drm, crtc) {
10146 struct intel_crtc_state *crtc_state =
10147 to_intel_crtc_state(crtc->base.state);
10149 fixup_plane_bitmasks(crtc_state);
10153 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10155 struct drm_i915_private *dev_priv = to_i915(dev);
10156 struct intel_cdclk_state *cdclk_state =
10157 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10158 struct intel_dbuf_state *dbuf_state =
10159 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10161 struct intel_crtc *crtc;
10162 struct intel_encoder *encoder;
10163 struct intel_connector *connector;
10164 struct drm_connector_list_iter conn_iter;
10165 u8 active_pipes = 0;
10167 for_each_intel_crtc(dev, crtc) {
10168 struct intel_crtc_state *crtc_state =
10169 to_intel_crtc_state(crtc->base.state);
10171 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10172 intel_crtc_free_hw_state(crtc_state);
10173 intel_crtc_state_reset(crtc_state, crtc);
10175 intel_crtc_get_pipe_config(crtc_state);
10177 crtc_state->hw.enable = crtc_state->hw.active;
10179 crtc->base.enabled = crtc_state->hw.enable;
10180 crtc->active = crtc_state->hw.active;
10182 if (crtc_state->hw.active)
10183 active_pipes |= BIT(crtc->pipe);
10185 drm_dbg_kms(&dev_priv->drm,
10186 "[CRTC:%d:%s] hw state readout: %s\n",
10187 crtc->base.base.id, crtc->base.name,
10188 str_enabled_disabled(crtc_state->hw.active));
10191 cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10193 readout_plane_state(dev_priv);
10195 for_each_intel_encoder(dev, encoder) {
10196 struct intel_crtc_state *crtc_state = NULL;
10200 if (encoder->get_hw_state(encoder, &pipe)) {
10201 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10202 crtc_state = to_intel_crtc_state(crtc->base.state);
10204 encoder->base.crtc = &crtc->base;
10205 intel_encoder_get_config(encoder, crtc_state);
10207 /* read out to slave crtc as well for bigjoiner */
10208 if (crtc_state->bigjoiner_pipes) {
10209 struct intel_crtc *slave_crtc;
10211 /* encoder should read be linked to bigjoiner master */
10212 WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
10214 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
10215 intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
10216 struct intel_crtc_state *slave_crtc_state;
10218 slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
10219 intel_encoder_get_config(encoder, slave_crtc_state);
10223 encoder->base.crtc = NULL;
10226 if (encoder->sync_state)
10227 encoder->sync_state(encoder, crtc_state);
10229 drm_dbg_kms(&dev_priv->drm,
10230 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10231 encoder->base.base.id, encoder->base.name,
10232 str_enabled_disabled(encoder->base.crtc),
10236 intel_dpll_readout_hw_state(dev_priv);
10238 drm_connector_list_iter_begin(dev, &conn_iter);
10239 for_each_intel_connector_iter(connector, &conn_iter) {
10240 if (connector->get_hw_state(connector)) {
10241 struct intel_crtc_state *crtc_state;
10242 struct intel_crtc *crtc;
10244 connector->base.dpms = DRM_MODE_DPMS_ON;
10246 encoder = intel_attached_encoder(connector);
10247 connector->base.encoder = &encoder->base;
10249 crtc = to_intel_crtc(encoder->base.crtc);
10250 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10252 if (crtc_state && crtc_state->hw.active) {
10254 * This has to be done during hardware readout
10255 * because anything calling .crtc_disable may
10256 * rely on the connector_mask being accurate.
10258 crtc_state->uapi.connector_mask |=
10259 drm_connector_mask(&connector->base);
10260 crtc_state->uapi.encoder_mask |=
10261 drm_encoder_mask(&encoder->base);
10264 connector->base.dpms = DRM_MODE_DPMS_OFF;
10265 connector->base.encoder = NULL;
10267 drm_dbg_kms(&dev_priv->drm,
10268 "[CONNECTOR:%d:%s] hw state readout: %s\n",
10269 connector->base.base.id, connector->base.name,
10270 str_enabled_disabled(connector->base.encoder));
10272 drm_connector_list_iter_end(&conn_iter);
10274 for_each_intel_crtc(dev, crtc) {
10275 struct intel_bw_state *bw_state =
10276 to_intel_bw_state(dev_priv->bw_obj.state);
10277 struct intel_crtc_state *crtc_state =
10278 to_intel_crtc_state(crtc->base.state);
10279 struct intel_plane *plane;
10282 if (crtc_state->hw.active) {
10284 * The initial mode needs to be set in order to keep
10285 * the atomic core happy. It wants a valid mode if the
10286 * crtc's enabled, so we do the above call.
10288 * But we don't set all the derived state fully, hence
10289 * set a flag to indicate that a full recalculation is
10290 * needed on the next commit.
10292 crtc_state->inherited = true;
10294 intel_crtc_update_active_timings(crtc_state);
10296 intel_crtc_copy_hw_to_uapi_state(crtc_state);
10299 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10300 const struct intel_plane_state *plane_state =
10301 to_intel_plane_state(plane->base.state);
10304 * FIXME don't have the fb yet, so can't
10305 * use intel_plane_data_rate() :(
10307 if (plane_state->uapi.visible)
10308 crtc_state->data_rate[plane->id] =
10309 4 * crtc_state->pixel_rate;
10311 * FIXME don't have the fb yet, so can't
10312 * use plane->min_cdclk() :(
10314 if (plane_state->uapi.visible && plane->min_cdclk) {
10315 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10316 crtc_state->min_cdclk[plane->id] =
10317 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10319 crtc_state->min_cdclk[plane->id] =
10320 crtc_state->pixel_rate;
10322 drm_dbg_kms(&dev_priv->drm,
10323 "[PLANE:%d:%s] min_cdclk %d kHz\n",
10324 plane->base.base.id, plane->base.name,
10325 crtc_state->min_cdclk[plane->id]);
10328 if (crtc_state->hw.active) {
10329 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10330 if (drm_WARN_ON(dev, min_cdclk < 0))
10334 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10335 cdclk_state->min_voltage_level[crtc->pipe] =
10336 crtc_state->min_voltage_level;
10338 intel_bw_crtc_update(bw_state, crtc_state);
10340 intel_pipe_config_sanity_check(dev_priv, crtc_state);
10345 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10347 struct intel_encoder *encoder;
10349 for_each_intel_encoder(&dev_priv->drm, encoder) {
10350 struct intel_crtc_state *crtc_state;
10352 if (!encoder->get_power_domains)
10356 * MST-primary and inactive encoders don't have a crtc state
10357 * and neither of these require any power domain references.
10359 if (!encoder->base.crtc)
10362 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10363 encoder->get_power_domains(encoder, crtc_state);
10367 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10370 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10371 * Also known as Wa_14010480278.
10373 if (IS_DISPLAY_VER(dev_priv, 10, 12))
10374 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10375 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10377 if (IS_HASWELL(dev_priv)) {
10379 * WaRsPkgCStateDisplayPMReq:hsw
10380 * System hang if this isn't done before disabling all planes!
10382 intel_de_write(dev_priv, CHICKEN_PAR1_1,
10383 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10386 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10387 /* Display WA #1142:kbl,cfl,cml */
10388 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10389 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10390 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10391 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10392 KBL_ARB_FILL_SPARE_14);
10397 /* Scan out the current hw modeset state,
10398 * and sanitizes it to the current state
10401 intel_modeset_setup_hw_state(struct drm_device *dev,
10402 struct drm_modeset_acquire_ctx *ctx)
10404 struct drm_i915_private *dev_priv = to_i915(dev);
10405 struct intel_encoder *encoder;
10406 struct intel_crtc *crtc;
10407 intel_wakeref_t wakeref;
10409 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10411 intel_early_display_was(dev_priv);
10412 intel_modeset_readout_hw_state(dev);
10414 /* HW state is read out, now we need to sanitize this mess. */
10415 get_encoder_power_domains(dev_priv);
10417 intel_pch_sanitize(dev_priv);
10420 * intel_sanitize_plane_mapping() may need to do vblank
10421 * waits, so we need vblank interrupts restored beforehand.
10423 for_each_intel_crtc(&dev_priv->drm, crtc) {
10424 struct intel_crtc_state *crtc_state =
10425 to_intel_crtc_state(crtc->base.state);
10427 drm_crtc_vblank_reset(&crtc->base);
10429 if (crtc_state->hw.active)
10430 intel_crtc_vblank_on(crtc_state);
10433 intel_fbc_sanitize(dev_priv);
10435 intel_sanitize_plane_mapping(dev_priv);
10437 for_each_intel_encoder(dev, encoder)
10438 intel_sanitize_encoder(encoder);
10440 for_each_intel_crtc(&dev_priv->drm, crtc) {
10441 struct intel_crtc_state *crtc_state =
10442 to_intel_crtc_state(crtc->base.state);
10444 intel_sanitize_crtc(crtc, ctx);
10445 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10448 intel_modeset_update_connector_atomic_state(dev);
10450 intel_dpll_sanitize_state(dev_priv);
10452 if (IS_G4X(dev_priv)) {
10453 g4x_wm_get_hw_state(dev_priv);
10454 g4x_wm_sanitize(dev_priv);
10455 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10456 vlv_wm_get_hw_state(dev_priv);
10457 vlv_wm_sanitize(dev_priv);
10458 } else if (DISPLAY_VER(dev_priv) >= 9) {
10459 skl_wm_get_hw_state(dev_priv);
10460 skl_wm_sanitize(dev_priv);
10461 } else if (HAS_PCH_SPLIT(dev_priv)) {
10462 ilk_wm_get_hw_state(dev_priv);
10465 for_each_intel_crtc(dev, crtc) {
10466 struct intel_crtc_state *crtc_state =
10467 to_intel_crtc_state(crtc->base.state);
10470 put_domains = modeset_get_crtc_power_domains(crtc_state);
10471 if (drm_WARN_ON(dev, put_domains))
10472 modeset_put_crtc_power_domains(crtc, put_domains);
10475 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10477 intel_power_domains_sanitize_state(dev_priv);
10480 void intel_display_resume(struct drm_device *dev)
10482 struct drm_i915_private *dev_priv = to_i915(dev);
10483 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10484 struct drm_modeset_acquire_ctx ctx;
10487 if (!HAS_DISPLAY(dev_priv))
10490 dev_priv->modeset_restore_state = NULL;
10492 state->acquire_ctx = &ctx;
10494 drm_modeset_acquire_init(&ctx, 0);
10497 ret = drm_modeset_lock_all_ctx(dev, &ctx);
10498 if (ret != -EDEADLK)
10501 drm_modeset_backoff(&ctx);
10505 ret = __intel_display_resume(dev, state, &ctx);
10507 intel_enable_ipc(dev_priv);
10508 drm_modeset_drop_locks(&ctx);
10509 drm_modeset_acquire_fini(&ctx);
10512 drm_err(&dev_priv->drm,
10513 "Restoring old state failed with %i\n", ret);
10515 drm_atomic_state_put(state);
10518 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10520 struct intel_connector *connector;
10521 struct drm_connector_list_iter conn_iter;
10523 /* Kill all the work that may have been queued by hpd. */
10524 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10525 for_each_intel_connector_iter(connector, &conn_iter) {
10526 if (connector->modeset_retry_work.func)
10527 cancel_work_sync(&connector->modeset_retry_work);
10528 if (connector->hdcp.shim) {
10529 cancel_delayed_work_sync(&connector->hdcp.check_work);
10530 cancel_work_sync(&connector->hdcp.prop_work);
10533 drm_connector_list_iter_end(&conn_iter);
10536 /* part #1: call before irq uninstall */
10537 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10539 if (!HAS_DISPLAY(i915))
10542 flush_workqueue(i915->flip_wq);
10543 flush_workqueue(i915->modeset_wq);
10545 flush_work(&i915->atomic_helper.free_work);
10546 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10549 /* part #2: call after irq uninstall */
10550 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10552 if (!HAS_DISPLAY(i915))
10556 * Due to the hpd irq storm handling the hotplug work can re-arm the
10557 * poll handlers. Hence disable polling after hpd handling is shut down.
10559 intel_hpd_poll_fini(i915);
10562 * MST topology needs to be suspended so we don't have any calls to
10563 * fbdev after it's finalized. MST will be destroyed later as part of
10564 * drm_mode_config_cleanup()
10566 intel_dp_mst_suspend(i915);
10568 /* poll work can call into fbdev, hence clean that up afterwards */
10569 intel_fbdev_fini(i915);
10571 intel_unregister_dsm_handler();
10573 /* flush any delayed tasks or pending work */
10574 flush_scheduled_work();
10576 intel_hdcp_component_fini(i915);
10578 intel_mode_config_cleanup(i915);
10580 intel_overlay_cleanup(i915);
10582 intel_gmbus_teardown(i915);
10584 destroy_workqueue(i915->flip_wq);
10585 destroy_workqueue(i915->modeset_wq);
10587 intel_fbc_cleanup(i915);
10590 /* part #3: call after gem init */
10591 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10593 intel_dmc_ucode_fini(i915);
10595 intel_power_domains_driver_remove(i915);
10597 intel_vga_unregister(i915);
10599 intel_bios_driver_remove(i915);
10602 bool intel_modeset_probe_defer(struct pci_dev *pdev)
10604 struct drm_privacy_screen *privacy_screen;
10607 * apple-gmux is needed on dual GPU MacBook Pro
10608 * to probe the panel if we're the inactive GPU.
10610 if (vga_switcheroo_client_probe_defer(pdev))
10613 /* If the LCD panel has a privacy-screen, wait for it */
10614 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10615 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10618 drm_privacy_screen_put(privacy_screen);
10623 void intel_display_driver_register(struct drm_i915_private *i915)
10625 if (!HAS_DISPLAY(i915))
10628 intel_display_debugfs_register(i915);
10630 /* Must be done after probing outputs */
10631 intel_opregion_register(i915);
10632 acpi_video_register();
10634 intel_audio_init(i915);
10637 * Some ports require correctly set-up hpd registers for
10638 * detection to work properly (leading to ghost connected
10639 * connector status), e.g. VGA on gm45. Hence we can only set
10640 * up the initial fbdev config after hpd irqs are fully
10641 * enabled. We do it last so that the async config cannot run
10642 * before the connectors are registered.
10644 intel_fbdev_initial_config_async(&i915->drm);
10647 * We need to coordinate the hotplugs with the asynchronous
10648 * fbdev configuration, for which we use the
10649 * fbdev->async_cookie.
10651 drm_kms_helper_poll_init(&i915->drm);
10654 void intel_display_driver_unregister(struct drm_i915_private *i915)
10656 if (!HAS_DISPLAY(i915))
10659 intel_fbdev_unregister(i915);
10660 intel_audio_deinit(i915);
10663 * After flushing the fbdev (incl. a late async config which
10664 * will have delayed queuing of a hotplug event), then flush
10665 * the hotplug events.
10667 drm_kms_helper_poll_fini(&i915->drm);
10668 drm_atomic_helper_shutdown(&i915->drm);
10670 acpi_video_unregister();
10671 intel_opregion_unregister(i915);
10674 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
10676 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);