2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 #include <linux/vga_switcheroo.h>
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_dp_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_privacy_screen_consumer.h>
46 #include <drm/drm_probe_helper.h>
47 #include <drm/drm_rect.h>
49 #include "display/intel_audio.h"
50 #include "display/intel_crt.h"
51 #include "display/intel_ddi.h"
52 #include "display/intel_display_debugfs.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
73 #include "gt/gen8_ppgtt.h"
79 #include "intel_acpi.h"
80 #include "intel_atomic.h"
81 #include "intel_atomic_plane.h"
83 #include "intel_cdclk.h"
84 #include "intel_color.h"
85 #include "intel_crtc.h"
87 #include "intel_display_types.h"
88 #include "intel_dmc.h"
89 #include "intel_dp_link_training.h"
90 #include "intel_dpt.h"
91 #include "intel_fbc.h"
92 #include "intel_fbdev.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdcp.h"
97 #include "intel_hotplug.h"
98 #include "intel_overlay.h"
99 #include "intel_panel.h"
100 #include "intel_pch_display.h"
101 #include "intel_pch_refclk.h"
102 #include "intel_pcode.h"
103 #include "intel_pipe_crc.h"
104 #include "intel_plane_initial.h"
105 #include "intel_pm.h"
106 #include "intel_pps.h"
107 #include "intel_psr.h"
108 #include "intel_quirks.h"
109 #include "intel_sprite.h"
110 #include "intel_tc.h"
111 #include "intel_vga.h"
112 #include "i9xx_plane.h"
113 #include "skl_scaler.h"
114 #include "skl_universal_plane.h"
115 #include "vlv_dsi_pll.h"
116 #include "vlv_sideband.h"
119 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
120 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
121 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
122 const struct intel_link_m_n *m_n,
123 const struct intel_link_m_n *m2_n2);
124 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
126 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
127 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
128 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
129 static void intel_modeset_setup_hw_state(struct drm_device *dev,
130 struct drm_modeset_acquire_ctx *ctx);
133 * intel_update_watermarks - update FIFO watermark values based on current modes
134 * @dev_priv: i915 device
136 * Calculate watermark values for the various WM regs based on current mode
137 * and plane configuration.
139 * There are several cases to deal with here:
140 * - normal (i.e. non-self-refresh)
141 * - self-refresh (SR) mode
142 * - lines are large relative to FIFO size (buffer can hold up to 2)
143 * - lines are small relative to FIFO size (buffer can hold more than 2
144 * lines), so need to account for TLB latency
146 * The normal calculation is:
147 * watermark = dotclock * bytes per pixel * latency
148 * where latency is platform & configuration dependent (we assume pessimal
151 * The SR calculation is:
152 * watermark = (trunc(latency/line time)+1) * surface width *
155 * line time = htotal / dotclock
156 * surface width = hdisplay for normal plane and 64 for cursor
157 * and latency is assumed to be high, as above.
159 * The final value programmed to the register should always be rounded up,
160 * and include an extra 2 entries to account for clock crossings.
162 * We don't use the sprite, so we can ignore that. And on Crestline we have
163 * to set the non-SR watermarks to 8.
165 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
167 if (dev_priv->wm_disp->update_wm)
168 dev_priv->wm_disp->update_wm(dev_priv);
171 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
172 struct intel_crtc *crtc)
174 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
175 if (dev_priv->wm_disp->compute_pipe_wm)
176 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
180 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
181 struct intel_crtc *crtc)
183 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
184 if (!dev_priv->wm_disp->compute_intermediate_wm)
186 if (drm_WARN_ON(&dev_priv->drm,
187 !dev_priv->wm_disp->compute_pipe_wm))
189 return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
192 static bool intel_initial_watermarks(struct intel_atomic_state *state,
193 struct intel_crtc *crtc)
195 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
196 if (dev_priv->wm_disp->initial_watermarks) {
197 dev_priv->wm_disp->initial_watermarks(state, crtc);
203 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
204 struct intel_crtc *crtc)
206 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
207 if (dev_priv->wm_disp->atomic_update_watermarks)
208 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
211 static void intel_optimize_watermarks(struct intel_atomic_state *state,
212 struct intel_crtc *crtc)
214 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
215 if (dev_priv->wm_disp->optimize_watermarks)
216 dev_priv->wm_disp->optimize_watermarks(state, crtc);
219 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
221 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
222 if (dev_priv->wm_disp->compute_global_watermarks)
223 return dev_priv->wm_disp->compute_global_watermarks(state);
227 /* returns HPLL frequency in kHz */
228 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
230 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
232 /* Obtain SKU information */
233 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
234 CCK_FUSE_HPLL_FREQ_MASK;
236 return vco_freq[hpll_freq] * 1000;
239 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
240 const char *name, u32 reg, int ref_freq)
245 val = vlv_cck_read(dev_priv, reg);
246 divider = val & CCK_FREQUENCY_VALUES;
248 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
249 (divider << CCK_FREQUENCY_STATUS_SHIFT),
250 "%s change in progress\n", name);
252 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
255 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
256 const char *name, u32 reg)
260 vlv_cck_get(dev_priv);
262 if (dev_priv->hpll_freq == 0)
263 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
265 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
267 vlv_cck_put(dev_priv);
272 static void intel_update_czclk(struct drm_i915_private *dev_priv)
274 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
277 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
278 CCK_CZ_CLOCK_CONTROL);
280 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
281 dev_priv->czclk_freq);
284 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
286 return (crtc_state->active_planes &
287 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
290 /* WA Display #0827: Gen9:all */
292 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
295 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
296 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
298 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
299 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
302 /* Wa_2006604312:icl,ehl */
304 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
308 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
309 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
311 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
312 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
315 /* Wa_1604331009:icl,jsl,ehl */
317 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
320 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
321 enable ? CURSOR_GATING_DIS : 0);
325 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
327 return crtc_state->master_transcoder != INVALID_TRANSCODER;
331 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
333 return crtc_state->sync_mode_slaves_mask != 0;
337 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
339 return is_trans_port_sync_master(crtc_state) ||
340 is_trans_port_sync_slave(crtc_state);
343 static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
345 if (crtc_state->bigjoiner_slave)
346 return crtc_state->bigjoiner_linked_crtc;
348 return to_intel_crtc(crtc_state->uapi.crtc);
351 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
354 i915_reg_t reg = PIPEDSL(pipe);
358 if (DISPLAY_VER(dev_priv) == 2)
359 line_mask = DSL_LINEMASK_GEN2;
361 line_mask = DSL_LINEMASK_GEN3;
363 line1 = intel_de_read(dev_priv, reg) & line_mask;
365 line2 = intel_de_read(dev_priv, reg) & line_mask;
367 return line1 != line2;
370 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
372 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
373 enum pipe pipe = crtc->pipe;
375 /* Wait for the display line to settle/start moving */
376 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
377 drm_err(&dev_priv->drm,
378 "pipe %c scanline %s wait timed out\n",
379 pipe_name(pipe), onoff(state));
382 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
384 wait_for_pipe_scanline_moving(crtc, false);
387 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
389 wait_for_pipe_scanline_moving(crtc, true);
393 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
395 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
398 if (DISPLAY_VER(dev_priv) >= 4) {
399 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
400 i915_reg_t reg = PIPECONF(cpu_transcoder);
402 /* Wait for the Pipe State to go off */
403 if (intel_de_wait_for_clear(dev_priv, reg,
404 I965_PIPECONF_ACTIVE, 100))
405 drm_WARN(&dev_priv->drm, 1,
406 "pipe_off wait timed out\n");
408 intel_wait_for_pipe_scanline_stopped(crtc);
412 void assert_transcoder(struct drm_i915_private *dev_priv,
413 enum transcoder cpu_transcoder, bool state)
416 enum intel_display_power_domain power_domain;
417 intel_wakeref_t wakeref;
419 /* we keep both pipes enabled on 830 */
420 if (IS_I830(dev_priv))
423 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
424 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
426 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
427 cur_state = !!(val & PIPECONF_ENABLE);
429 intel_display_power_put(dev_priv, power_domain, wakeref);
434 I915_STATE_WARN(cur_state != state,
435 "transcoder %s assertion failure (expected %s, current %s)\n",
436 transcoder_name(cpu_transcoder),
437 onoff(state), onoff(cur_state));
440 static void assert_plane(struct intel_plane *plane, bool state)
445 cur_state = plane->get_hw_state(plane, &pipe);
447 I915_STATE_WARN(cur_state != state,
448 "%s assertion failure (expected %s, current %s)\n",
449 plane->base.name, onoff(state), onoff(cur_state));
452 #define assert_plane_enabled(p) assert_plane(p, true)
453 #define assert_plane_disabled(p) assert_plane(p, false)
455 static void assert_planes_disabled(struct intel_crtc *crtc)
457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
458 struct intel_plane *plane;
460 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
461 assert_plane_disabled(plane);
464 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
465 struct intel_digital_port *dig_port,
466 unsigned int expected_mask)
471 switch (dig_port->base.port) {
473 port_mask = DPLL_PORTB_READY_MASK;
477 port_mask = DPLL_PORTC_READY_MASK;
482 port_mask = DPLL_PORTD_READY_MASK;
483 dpll_reg = DPIO_PHY_STATUS;
489 if (intel_de_wait_for_register(dev_priv, dpll_reg,
490 port_mask, expected_mask, 1000))
491 drm_WARN(&dev_priv->drm, 1,
492 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
493 dig_port->base.base.base.id, dig_port->base.base.name,
494 intel_de_read(dev_priv, dpll_reg) & port_mask,
498 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
502 if (HAS_PCH_LPT(dev_priv))
508 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
510 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
512 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
513 enum pipe pipe = crtc->pipe;
517 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
519 assert_planes_disabled(crtc);
522 * A pipe without a PLL won't actually be able to drive bits from
523 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
526 if (HAS_GMCH(dev_priv)) {
527 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
528 assert_dsi_pll_enabled(dev_priv);
530 assert_pll_enabled(dev_priv, pipe);
532 if (new_crtc_state->has_pch_encoder) {
533 /* if driving the PCH, we need FDI enabled */
534 assert_fdi_rx_pll_enabled(dev_priv,
535 intel_crtc_pch_transcoder(crtc));
536 assert_fdi_tx_pll_enabled(dev_priv,
537 (enum pipe) cpu_transcoder);
539 /* FIXME: assert CPU port conditions for SNB+ */
542 /* Wa_22012358565:adl-p */
543 if (DISPLAY_VER(dev_priv) == 13)
544 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
545 0, PIPE_ARB_USE_PROG_SLOTS);
547 reg = PIPECONF(cpu_transcoder);
548 val = intel_de_read(dev_priv, reg);
549 if (val & PIPECONF_ENABLE) {
550 /* we keep both pipes enabled on 830 */
551 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
555 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
556 intel_de_posting_read(dev_priv, reg);
559 * Until the pipe starts PIPEDSL reads will return a stale value,
560 * which causes an apparent vblank timestamp jump when PIPEDSL
561 * resets to its proper value. That also messes up the frame count
562 * when it's derived from the timestamps. So let's wait for the
563 * pipe to start properly before we call drm_crtc_vblank_on()
565 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
566 intel_wait_for_pipe_scanline_moving(crtc);
569 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
571 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
573 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
574 enum pipe pipe = crtc->pipe;
578 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
581 * Make sure planes won't keep trying to pump pixels to us,
582 * or we might hang the display.
584 assert_planes_disabled(crtc);
586 reg = PIPECONF(cpu_transcoder);
587 val = intel_de_read(dev_priv, reg);
588 if ((val & PIPECONF_ENABLE) == 0)
592 * Double wide has implications for planes
593 * so best keep it disabled when not needed.
595 if (old_crtc_state->double_wide)
596 val &= ~PIPECONF_DOUBLE_WIDE;
598 /* Don't disable pipe or pipe PLLs if needed */
599 if (!IS_I830(dev_priv))
600 val &= ~PIPECONF_ENABLE;
602 if (DISPLAY_VER(dev_priv) >= 12)
603 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
604 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
606 intel_de_write(dev_priv, reg, val);
607 if ((val & PIPECONF_ENABLE) == 0)
608 intel_wait_for_pipe_off(old_crtc_state);
611 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
613 unsigned int size = 0;
616 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
617 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
622 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
624 unsigned int size = 0;
627 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
628 unsigned int plane_size;
630 if (rem_info->plane[i].linear)
631 plane_size = rem_info->plane[i].size;
633 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
638 if (rem_info->plane_alignment)
639 size = ALIGN(size, rem_info->plane_alignment);
647 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
649 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
650 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
652 return DISPLAY_VER(dev_priv) < 4 ||
654 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
658 * Convert the x/y offsets into a linear offset.
659 * Only valid with 0/180 degree rotation, which is fine since linear
660 * offset is only used with linear buffers on pre-hsw and tiled buffers
661 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
663 u32 intel_fb_xy_to_linear(int x, int y,
664 const struct intel_plane_state *state,
667 const struct drm_framebuffer *fb = state->hw.fb;
668 unsigned int cpp = fb->format->cpp[color_plane];
669 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
671 return y * pitch + x * cpp;
675 * Add the x/y offsets derived from fb->offsets[] to the user
676 * specified plane src x/y offsets. The resulting x/y offsets
677 * specify the start of scanout from the beginning of the gtt mapping.
679 void intel_add_fb_offsets(int *x, int *y,
680 const struct intel_plane_state *state,
684 *x += state->view.color_plane[color_plane].x;
685 *y += state->view.color_plane[color_plane].y;
688 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
689 u32 pixel_format, u64 modifier)
691 struct intel_crtc *crtc;
692 struct intel_plane *plane;
694 if (!HAS_DISPLAY(dev_priv))
698 * We assume the primary plane for pipe A has
699 * the highest stride limits of them all,
700 * if in case pipe A is disabled, use the first pipe from pipe_mask.
702 crtc = intel_first_crtc(dev_priv);
706 plane = to_intel_plane(crtc->base.primary);
708 return plane->max_stride(plane, pixel_format, modifier,
713 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
714 struct intel_plane_state *plane_state,
717 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
719 plane_state->uapi.visible = visible;
722 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
724 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
727 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
729 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
730 struct drm_plane *plane;
733 * Active_planes aliases if multiple "primary" or cursor planes
734 * have been used on the same (or wrong) pipe. plane_mask uses
735 * unique ids, hence we can use that to reconstruct active_planes.
737 crtc_state->enabled_planes = 0;
738 crtc_state->active_planes = 0;
740 drm_for_each_plane_mask(plane, &dev_priv->drm,
741 crtc_state->uapi.plane_mask) {
742 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
743 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
747 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
748 struct intel_plane *plane)
750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
751 struct intel_crtc_state *crtc_state =
752 to_intel_crtc_state(crtc->base.state);
753 struct intel_plane_state *plane_state =
754 to_intel_plane_state(plane->base.state);
756 drm_dbg_kms(&dev_priv->drm,
757 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
758 plane->base.base.id, plane->base.name,
759 crtc->base.base.id, crtc->base.name);
761 intel_set_plane_visible(crtc_state, plane_state, false);
762 fixup_plane_bitmasks(crtc_state);
763 crtc_state->data_rate[plane->id] = 0;
764 crtc_state->min_cdclk[plane->id] = 0;
766 if (plane->id == PLANE_PRIMARY)
767 hsw_disable_ips(crtc_state);
770 * Vblank time updates from the shadow to live plane control register
771 * are blocked if the memory self-refresh mode is active at that
772 * moment. So to make sure the plane gets truly disabled, disable
773 * first the self-refresh mode. The self-refresh enable bit in turn
774 * will be checked/applied by the HW only at the next frame start
775 * event which is after the vblank start event, so we need to have a
776 * wait-for-vblank between disabling the plane and the pipe.
778 if (HAS_GMCH(dev_priv) &&
779 intel_set_memory_cxsr(dev_priv, false))
780 intel_crtc_wait_for_next_vblank(crtc);
783 * Gen2 reports pipe underruns whenever all planes are disabled.
784 * So disable underrun reporting before all the planes get disabled.
786 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
787 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
789 intel_plane_disable_arm(plane, crtc_state);
790 intel_crtc_wait_for_next_vblank(crtc);
794 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
798 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
799 plane_state->view.color_plane[0].offset, 0);
805 __intel_display_resume(struct drm_device *dev,
806 struct drm_atomic_state *state,
807 struct drm_modeset_acquire_ctx *ctx)
809 struct drm_crtc_state *crtc_state;
810 struct drm_crtc *crtc;
813 intel_modeset_setup_hw_state(dev, ctx);
814 intel_vga_redisable(to_i915(dev));
820 * We've duplicated the state, pointers to the old state are invalid.
822 * Don't attempt to use the old state until we commit the duplicated state.
824 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
826 * Force recalculation even if we restore
827 * current state. With fast modeset this may not result
828 * in a modeset when the state is compatible.
830 crtc_state->mode_changed = true;
833 /* ignore any reset values/BIOS leftovers in the WM registers */
834 if (!HAS_GMCH(to_i915(dev)))
835 to_intel_atomic_state(state)->skip_intermediate_wm = true;
837 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
839 drm_WARN_ON(dev, ret == -EDEADLK);
843 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
845 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
846 intel_has_gpu_reset(to_gt(dev_priv)));
849 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
851 struct drm_device *dev = &dev_priv->drm;
852 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
853 struct drm_atomic_state *state;
856 if (!HAS_DISPLAY(dev_priv))
859 /* reset doesn't touch the display */
860 if (!dev_priv->params.force_reset_modeset_test &&
861 !gpu_reset_clobbers_display(dev_priv))
864 /* We have a modeset vs reset deadlock, defensively unbreak it. */
865 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
866 smp_mb__after_atomic();
867 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
869 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
870 drm_dbg_kms(&dev_priv->drm,
871 "Modeset potentially stuck, unbreaking through wedging\n");
872 intel_gt_set_wedged(to_gt(dev_priv));
876 * Need mode_config.mutex so that we don't
877 * trample ongoing ->detect() and whatnot.
879 mutex_lock(&dev->mode_config.mutex);
880 drm_modeset_acquire_init(ctx, 0);
882 ret = drm_modeset_lock_all_ctx(dev, ctx);
886 drm_modeset_backoff(ctx);
889 * Disabling the crtcs gracefully seems nicer. Also the
890 * g33 docs say we should at least disable all the planes.
892 state = drm_atomic_helper_duplicate_state(dev, ctx);
894 ret = PTR_ERR(state);
895 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
900 ret = drm_atomic_helper_disable_all(dev, ctx);
902 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
904 drm_atomic_state_put(state);
908 dev_priv->modeset_restore_state = state;
909 state->acquire_ctx = ctx;
912 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
914 struct drm_device *dev = &dev_priv->drm;
915 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
916 struct drm_atomic_state *state;
919 if (!HAS_DISPLAY(dev_priv))
922 /* reset doesn't touch the display */
923 if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
926 state = fetch_and_zero(&dev_priv->modeset_restore_state);
930 /* reset doesn't touch the display */
931 if (!gpu_reset_clobbers_display(dev_priv)) {
932 /* for testing only restore the display */
933 ret = __intel_display_resume(dev, state, ctx);
935 drm_err(&dev_priv->drm,
936 "Restoring old state failed with %i\n", ret);
939 * The display has been reset as well,
940 * so need a full re-initialization.
942 intel_pps_unlock_regs_wa(dev_priv);
943 intel_modeset_init_hw(dev_priv);
944 intel_init_clock_gating(dev_priv);
945 intel_hpd_init(dev_priv);
947 ret = __intel_display_resume(dev, state, ctx);
949 drm_err(&dev_priv->drm,
950 "Restoring old state failed with %i\n", ret);
952 intel_hpd_poll_disable(dev_priv);
955 drm_atomic_state_put(state);
957 drm_modeset_drop_locks(ctx);
958 drm_modeset_acquire_fini(ctx);
959 mutex_unlock(&dev->mode_config.mutex);
961 clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
964 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
966 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
967 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
968 enum pipe pipe = crtc->pipe;
971 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
974 * Display WA #1153: icl
975 * enable hardware to bypass the alpha math
976 * and rounding for per-pixel values 00 and 0xff
978 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
980 * Display WA # 1605353570: icl
981 * Set the pixel rounding bit to 1 for allowing
982 * passthrough of Frame buffer pixels unmodified
985 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
988 * Underrun recovery must always be disabled on display 13+.
989 * DG2 chicken bit meaning is inverted compared to other platforms.
991 if (IS_DG2(dev_priv))
992 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
993 else if (DISPLAY_VER(dev_priv) >= 13)
994 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
996 /* Wa_14010547955:dg2 */
997 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
998 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
1000 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1003 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1005 struct drm_crtc *crtc;
1008 drm_for_each_crtc(crtc, &dev_priv->drm) {
1009 struct drm_crtc_commit *commit;
1010 spin_lock(&crtc->commit_lock);
1011 commit = list_first_entry_or_null(&crtc->commit_list,
1012 struct drm_crtc_commit, commit_entry);
1013 cleanup_done = commit ?
1014 try_wait_for_completion(&commit->cleanup_done) : true;
1015 spin_unlock(&crtc->commit_lock);
1020 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1029 * Finds the encoder associated with the given CRTC. This can only be
1030 * used when we know that the CRTC isn't feeding multiple encoders!
1032 struct intel_encoder *
1033 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1034 const struct intel_crtc_state *crtc_state)
1036 const struct drm_connector_state *connector_state;
1037 const struct drm_connector *connector;
1038 struct intel_encoder *encoder = NULL;
1039 struct intel_crtc *master_crtc;
1040 int num_encoders = 0;
1043 master_crtc = intel_master_crtc(crtc_state);
1045 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1046 if (connector_state->crtc != &master_crtc->base)
1049 encoder = to_intel_encoder(connector_state->best_encoder);
1053 drm_WARN(encoder->base.dev, num_encoders != 1,
1054 "%d encoders for pipe %c\n",
1055 num_encoders, pipe_name(master_crtc->pipe));
1060 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1063 i915_reg_t dslreg = PIPEDSL(pipe);
1066 temp = intel_de_read(dev_priv, dslreg);
1068 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1069 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1070 drm_err(&dev_priv->drm,
1071 "mode set failed: pipe %c stuck\n",
1076 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1078 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1079 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1080 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1081 enum pipe pipe = crtc->pipe;
1082 int width = drm_rect_width(dst);
1083 int height = drm_rect_height(dst);
1087 if (!crtc_state->pch_pfit.enabled)
1090 /* Force use of hard-coded filter coefficients
1091 * as some pre-programmed values are broken,
1094 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1095 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1096 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1098 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1100 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1101 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1104 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1106 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1107 struct drm_device *dev = crtc->base.dev;
1108 struct drm_i915_private *dev_priv = to_i915(dev);
1110 if (!crtc_state->ips_enabled)
1114 * We can only enable IPS after we enable a plane and wait for a vblank
1115 * This function is called from post_plane_update, which is run after
1118 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1120 if (IS_BROADWELL(dev_priv)) {
1121 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1122 IPS_ENABLE | IPS_PCODE_CONTROL));
1123 /* Quoting Art Runyan: "its not safe to expect any particular
1124 * value in IPS_CTL bit 31 after enabling IPS through the
1125 * mailbox." Moreover, the mailbox may return a bogus state,
1126 * so we need to just enable it and continue on.
1129 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1130 /* The bit only becomes 1 in the next vblank, so this wait here
1131 * is essentially intel_wait_for_vblank. If we don't have this
1132 * and don't wait for vblanks until the end of crtc_enable, then
1133 * the HW state readout code will complain that the expected
1134 * IPS_CTL value is not the one we read. */
1135 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1136 drm_err(&dev_priv->drm,
1137 "Timed out waiting for IPS enable\n");
1141 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1143 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1144 struct drm_device *dev = crtc->base.dev;
1145 struct drm_i915_private *dev_priv = to_i915(dev);
1147 if (!crtc_state->ips_enabled)
1150 if (IS_BROADWELL(dev_priv)) {
1152 sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1154 * Wait for PCODE to finish disabling IPS. The BSpec specified
1155 * 42ms timeout value leads to occasional timeouts so use 100ms
1158 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1159 drm_err(&dev_priv->drm,
1160 "Timed out waiting for IPS disable\n");
1162 intel_de_write(dev_priv, IPS_CTL, 0);
1163 intel_de_posting_read(dev_priv, IPS_CTL);
1166 /* We need to wait for a vblank before we can disable the plane. */
1167 intel_crtc_wait_for_next_vblank(crtc);
1170 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1173 (void) intel_overlay_switch_off(crtc->overlay);
1175 /* Let userspace switch the overlay on again. In most cases userspace
1176 * has to recompute where to put it anyway.
1180 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1181 const struct intel_crtc_state *new_crtc_state)
1183 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1184 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1186 if (!old_crtc_state->ips_enabled)
1189 if (intel_crtc_needs_modeset(new_crtc_state))
1193 * Workaround : Do not read or write the pipe palette/gamma data while
1194 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1196 * Disable IPS before we program the LUT.
1198 if (IS_HASWELL(dev_priv) &&
1199 (new_crtc_state->uapi.color_mgmt_changed ||
1200 new_crtc_state->update_pipe) &&
1201 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1204 return !new_crtc_state->ips_enabled;
1207 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1208 const struct intel_crtc_state *new_crtc_state)
1210 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1211 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1213 if (!new_crtc_state->ips_enabled)
1216 if (intel_crtc_needs_modeset(new_crtc_state))
1220 * Workaround : Do not read or write the pipe palette/gamma data while
1221 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1223 * Re-enable IPS after the LUT has been programmed.
1225 if (IS_HASWELL(dev_priv) &&
1226 (new_crtc_state->uapi.color_mgmt_changed ||
1227 new_crtc_state->update_pipe) &&
1228 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1232 * We can't read out IPS on broadwell, assume the worst and
1233 * forcibly enable IPS on the first fastset.
1235 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1238 return !old_crtc_state->ips_enabled;
1241 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1243 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1245 if (!crtc_state->nv12_planes)
1248 /* WA Display #0827: Gen9:all */
1249 if (DISPLAY_VER(dev_priv) == 9)
1255 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1257 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1259 /* Wa_2006604312:icl,ehl */
1260 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1266 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1268 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1270 /* Wa_1604331009:icl,jsl,ehl */
1271 if (is_hdr_mode(crtc_state) &&
1272 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1273 DISPLAY_VER(dev_priv) == 11)
1279 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1280 enum pipe pipe, bool enable)
1282 if (DISPLAY_VER(i915) == 9) {
1284 * "Plane N strech max must be programmed to 11b (x1)
1285 * when Async flips are enabled on that plane."
1287 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1288 SKL_PLANE1_STRETCH_MAX_MASK,
1289 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1291 /* Also needed on HSW/BDW albeit undocumented */
1292 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1293 HSW_PRI_STRETCH_MAX_MASK,
1294 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1298 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1300 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1302 return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
1303 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1306 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1307 const struct intel_crtc_state *new_crtc_state)
1309 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1310 new_crtc_state->active_planes;
1313 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1314 const struct intel_crtc_state *new_crtc_state)
1316 return old_crtc_state->active_planes &&
1317 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1320 static void intel_post_plane_update(struct intel_atomic_state *state,
1321 struct intel_crtc *crtc)
1323 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1324 const struct intel_crtc_state *old_crtc_state =
1325 intel_atomic_get_old_crtc_state(state, crtc);
1326 const struct intel_crtc_state *new_crtc_state =
1327 intel_atomic_get_new_crtc_state(state, crtc);
1328 enum pipe pipe = crtc->pipe;
1330 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1332 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1333 intel_update_watermarks(dev_priv);
1335 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1336 hsw_enable_ips(new_crtc_state);
1338 intel_fbc_post_update(state, crtc);
1339 intel_drrs_page_flip(state, crtc);
1341 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1342 !needs_async_flip_vtd_wa(new_crtc_state))
1343 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1345 if (needs_nv12_wa(old_crtc_state) &&
1346 !needs_nv12_wa(new_crtc_state))
1347 skl_wa_827(dev_priv, pipe, false);
1349 if (needs_scalerclk_wa(old_crtc_state) &&
1350 !needs_scalerclk_wa(new_crtc_state))
1351 icl_wa_scalerclkgating(dev_priv, pipe, false);
1353 if (needs_cursorclk_wa(old_crtc_state) &&
1354 !needs_cursorclk_wa(new_crtc_state))
1355 icl_wa_cursorclkgating(dev_priv, pipe, false);
1359 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1360 struct intel_crtc *crtc)
1362 const struct intel_crtc_state *crtc_state =
1363 intel_atomic_get_new_crtc_state(state, crtc);
1364 u8 update_planes = crtc_state->update_planes;
1365 const struct intel_plane_state *plane_state;
1366 struct intel_plane *plane;
1369 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1370 if (plane->enable_flip_done &&
1371 plane->pipe == crtc->pipe &&
1372 update_planes & BIT(plane->id))
1373 plane->enable_flip_done(plane);
1377 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1378 struct intel_crtc *crtc)
1380 const struct intel_crtc_state *crtc_state =
1381 intel_atomic_get_new_crtc_state(state, crtc);
1382 u8 update_planes = crtc_state->update_planes;
1383 const struct intel_plane_state *plane_state;
1384 struct intel_plane *plane;
1387 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1388 if (plane->disable_flip_done &&
1389 plane->pipe == crtc->pipe &&
1390 update_planes & BIT(plane->id))
1391 plane->disable_flip_done(plane);
1395 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1396 struct intel_crtc *crtc)
1398 const struct intel_crtc_state *old_crtc_state =
1399 intel_atomic_get_old_crtc_state(state, crtc);
1400 const struct intel_crtc_state *new_crtc_state =
1401 intel_atomic_get_new_crtc_state(state, crtc);
1402 u8 update_planes = new_crtc_state->update_planes;
1403 const struct intel_plane_state *old_plane_state;
1404 struct intel_plane *plane;
1405 bool need_vbl_wait = false;
1408 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1409 if (plane->need_async_flip_disable_wa &&
1410 plane->pipe == crtc->pipe &&
1411 update_planes & BIT(plane->id)) {
1413 * Apart from the async flip bit we want to
1414 * preserve the old state for the plane.
1416 plane->async_flip(plane, old_crtc_state,
1417 old_plane_state, false);
1418 need_vbl_wait = true;
1423 intel_crtc_wait_for_next_vblank(crtc);
1426 static void intel_pre_plane_update(struct intel_atomic_state *state,
1427 struct intel_crtc *crtc)
1429 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1430 const struct intel_crtc_state *old_crtc_state =
1431 intel_atomic_get_old_crtc_state(state, crtc);
1432 const struct intel_crtc_state *new_crtc_state =
1433 intel_atomic_get_new_crtc_state(state, crtc);
1434 enum pipe pipe = crtc->pipe;
1436 intel_psr_pre_plane_update(state, crtc);
1438 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1439 hsw_disable_ips(old_crtc_state);
1441 if (intel_fbc_pre_update(state, crtc))
1442 intel_crtc_wait_for_next_vblank(crtc);
1444 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1445 needs_async_flip_vtd_wa(new_crtc_state))
1446 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1448 /* Display WA 827 */
1449 if (!needs_nv12_wa(old_crtc_state) &&
1450 needs_nv12_wa(new_crtc_state))
1451 skl_wa_827(dev_priv, pipe, true);
1453 /* Wa_2006604312:icl,ehl */
1454 if (!needs_scalerclk_wa(old_crtc_state) &&
1455 needs_scalerclk_wa(new_crtc_state))
1456 icl_wa_scalerclkgating(dev_priv, pipe, true);
1458 /* Wa_1604331009:icl,jsl,ehl */
1459 if (!needs_cursorclk_wa(old_crtc_state) &&
1460 needs_cursorclk_wa(new_crtc_state))
1461 icl_wa_cursorclkgating(dev_priv, pipe, true);
1464 * Vblank time updates from the shadow to live plane control register
1465 * are blocked if the memory self-refresh mode is active at that
1466 * moment. So to make sure the plane gets truly disabled, disable
1467 * first the self-refresh mode. The self-refresh enable bit in turn
1468 * will be checked/applied by the HW only at the next frame start
1469 * event which is after the vblank start event, so we need to have a
1470 * wait-for-vblank between disabling the plane and the pipe.
1472 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1473 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1474 intel_crtc_wait_for_next_vblank(crtc);
1477 * IVB workaround: must disable low power watermarks for at least
1478 * one frame before enabling scaling. LP watermarks can be re-enabled
1479 * when scaling is disabled.
1481 * WaCxSRDisabledForSpriteScaling:ivb
1483 if (old_crtc_state->hw.active &&
1484 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1485 intel_crtc_wait_for_next_vblank(crtc);
1488 * If we're doing a modeset we don't need to do any
1489 * pre-vblank watermark programming here.
1491 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1493 * For platforms that support atomic watermarks, program the
1494 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1495 * will be the intermediate values that are safe for both pre- and
1496 * post- vblank; when vblank happens, the 'active' values will be set
1497 * to the final 'target' values and we'll do this again to get the
1498 * optimal watermarks. For gen9+ platforms, the values we program here
1499 * will be the final target values which will get automatically latched
1500 * at vblank time; no further programming will be necessary.
1502 * If a platform hasn't been transitioned to atomic watermarks yet,
1503 * we'll continue to update watermarks the old way, if flags tell
1506 if (!intel_initial_watermarks(state, crtc))
1507 if (new_crtc_state->update_wm_pre)
1508 intel_update_watermarks(dev_priv);
1512 * Gen2 reports pipe underruns whenever all planes are disabled.
1513 * So disable underrun reporting before all the planes get disabled.
1515 * We do this after .initial_watermarks() so that we have a
1516 * chance of catching underruns with the intermediate watermarks
1517 * vs. the old plane configuration.
1519 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1520 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1523 * WA for platforms where async address update enable bit
1524 * is double buffered and only latched at start of vblank.
1526 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1527 intel_crtc_async_flip_disable_wa(state, crtc);
1530 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1531 struct intel_crtc *crtc)
1533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1534 const struct intel_crtc_state *new_crtc_state =
1535 intel_atomic_get_new_crtc_state(state, crtc);
1536 unsigned int update_mask = new_crtc_state->update_planes;
1537 const struct intel_plane_state *old_plane_state;
1538 struct intel_plane *plane;
1539 unsigned fb_bits = 0;
1542 intel_crtc_dpms_overlay_disable(crtc);
1544 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1545 if (crtc->pipe != plane->pipe ||
1546 !(update_mask & BIT(plane->id)))
1549 intel_plane_disable_arm(plane, new_crtc_state);
1551 if (old_plane_state->uapi.visible)
1552 fb_bits |= plane->frontbuffer_bit;
1555 intel_frontbuffer_flip(dev_priv, fb_bits);
1559 * intel_connector_primary_encoder - get the primary encoder for a connector
1560 * @connector: connector for which to return the encoder
1562 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1563 * all connectors to their encoder, except for DP-MST connectors which have
1564 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1565 * pointed to by as many DP-MST connectors as there are pipes.
1567 static struct intel_encoder *
1568 intel_connector_primary_encoder(struct intel_connector *connector)
1570 struct intel_encoder *encoder;
1572 if (connector->mst_port)
1573 return &dp_to_dig_port(connector->mst_port)->base;
1575 encoder = intel_attached_encoder(connector);
1576 drm_WARN_ON(connector->base.dev, !encoder);
1581 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1583 struct drm_i915_private *i915 = to_i915(state->base.dev);
1584 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1585 struct intel_crtc *crtc;
1586 struct drm_connector_state *new_conn_state;
1587 struct drm_connector *connector;
1591 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1592 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1594 if (i915->dpll.mgr) {
1595 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1596 if (intel_crtc_needs_modeset(new_crtc_state))
1599 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1600 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1604 if (!state->modeset)
1607 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1609 struct intel_connector *intel_connector;
1610 struct intel_encoder *encoder;
1611 struct intel_crtc *crtc;
1613 if (!intel_connector_needs_modeset(state, connector))
1616 intel_connector = to_intel_connector(connector);
1617 encoder = intel_connector_primary_encoder(intel_connector);
1618 if (!encoder->update_prepare)
1621 crtc = new_conn_state->crtc ?
1622 to_intel_crtc(new_conn_state->crtc) : NULL;
1623 encoder->update_prepare(state, encoder, crtc);
1627 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1629 struct drm_connector_state *new_conn_state;
1630 struct drm_connector *connector;
1633 if (!state->modeset)
1636 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1638 struct intel_connector *intel_connector;
1639 struct intel_encoder *encoder;
1640 struct intel_crtc *crtc;
1642 if (!intel_connector_needs_modeset(state, connector))
1645 intel_connector = to_intel_connector(connector);
1646 encoder = intel_connector_primary_encoder(intel_connector);
1647 if (!encoder->update_complete)
1650 crtc = new_conn_state->crtc ?
1651 to_intel_crtc(new_conn_state->crtc) : NULL;
1652 encoder->update_complete(state, encoder, crtc);
1656 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1657 struct intel_crtc *crtc)
1659 const struct intel_crtc_state *crtc_state =
1660 intel_atomic_get_new_crtc_state(state, crtc);
1661 const struct drm_connector_state *conn_state;
1662 struct drm_connector *conn;
1665 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1666 struct intel_encoder *encoder =
1667 to_intel_encoder(conn_state->best_encoder);
1669 if (conn_state->crtc != &crtc->base)
1672 if (encoder->pre_pll_enable)
1673 encoder->pre_pll_enable(state, encoder,
1674 crtc_state, conn_state);
1678 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1679 struct intel_crtc *crtc)
1681 const struct intel_crtc_state *crtc_state =
1682 intel_atomic_get_new_crtc_state(state, crtc);
1683 const struct drm_connector_state *conn_state;
1684 struct drm_connector *conn;
1687 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1688 struct intel_encoder *encoder =
1689 to_intel_encoder(conn_state->best_encoder);
1691 if (conn_state->crtc != &crtc->base)
1694 if (encoder->pre_enable)
1695 encoder->pre_enable(state, encoder,
1696 crtc_state, conn_state);
1700 static void intel_encoders_enable(struct intel_atomic_state *state,
1701 struct intel_crtc *crtc)
1703 const struct intel_crtc_state *crtc_state =
1704 intel_atomic_get_new_crtc_state(state, crtc);
1705 const struct drm_connector_state *conn_state;
1706 struct drm_connector *conn;
1709 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1710 struct intel_encoder *encoder =
1711 to_intel_encoder(conn_state->best_encoder);
1713 if (conn_state->crtc != &crtc->base)
1716 if (encoder->enable)
1717 encoder->enable(state, encoder,
1718 crtc_state, conn_state);
1719 intel_opregion_notify_encoder(encoder, true);
1723 static void intel_encoders_disable(struct intel_atomic_state *state,
1724 struct intel_crtc *crtc)
1726 const struct intel_crtc_state *old_crtc_state =
1727 intel_atomic_get_old_crtc_state(state, crtc);
1728 const struct drm_connector_state *old_conn_state;
1729 struct drm_connector *conn;
1732 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1733 struct intel_encoder *encoder =
1734 to_intel_encoder(old_conn_state->best_encoder);
1736 if (old_conn_state->crtc != &crtc->base)
1739 intel_opregion_notify_encoder(encoder, false);
1740 if (encoder->disable)
1741 encoder->disable(state, encoder,
1742 old_crtc_state, old_conn_state);
1746 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1747 struct intel_crtc *crtc)
1749 const struct intel_crtc_state *old_crtc_state =
1750 intel_atomic_get_old_crtc_state(state, crtc);
1751 const struct drm_connector_state *old_conn_state;
1752 struct drm_connector *conn;
1755 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1756 struct intel_encoder *encoder =
1757 to_intel_encoder(old_conn_state->best_encoder);
1759 if (old_conn_state->crtc != &crtc->base)
1762 if (encoder->post_disable)
1763 encoder->post_disable(state, encoder,
1764 old_crtc_state, old_conn_state);
1768 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1769 struct intel_crtc *crtc)
1771 const struct intel_crtc_state *old_crtc_state =
1772 intel_atomic_get_old_crtc_state(state, crtc);
1773 const struct drm_connector_state *old_conn_state;
1774 struct drm_connector *conn;
1777 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1778 struct intel_encoder *encoder =
1779 to_intel_encoder(old_conn_state->best_encoder);
1781 if (old_conn_state->crtc != &crtc->base)
1784 if (encoder->post_pll_disable)
1785 encoder->post_pll_disable(state, encoder,
1786 old_crtc_state, old_conn_state);
1790 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1791 struct intel_crtc *crtc)
1793 const struct intel_crtc_state *crtc_state =
1794 intel_atomic_get_new_crtc_state(state, crtc);
1795 const struct drm_connector_state *conn_state;
1796 struct drm_connector *conn;
1799 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1800 struct intel_encoder *encoder =
1801 to_intel_encoder(conn_state->best_encoder);
1803 if (conn_state->crtc != &crtc->base)
1806 if (encoder->update_pipe)
1807 encoder->update_pipe(state, encoder,
1808 crtc_state, conn_state);
1812 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1814 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1815 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1817 plane->disable_arm(plane, crtc_state);
1820 static void ilk_crtc_enable(struct intel_atomic_state *state,
1821 struct intel_crtc *crtc)
1823 const struct intel_crtc_state *new_crtc_state =
1824 intel_atomic_get_new_crtc_state(state, crtc);
1825 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1826 enum pipe pipe = crtc->pipe;
1828 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1832 * Sometimes spurious CPU pipe underruns happen during FDI
1833 * training, at least with VGA+HDMI cloning. Suppress them.
1835 * On ILK we get an occasional spurious CPU pipe underruns
1836 * between eDP port A enable and vdd enable. Also PCH port
1837 * enable seems to result in the occasional CPU pipe underrun.
1839 * Spurious PCH underruns also occur during PCH enabling.
1841 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1842 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1844 if (intel_crtc_has_dp_encoder(new_crtc_state))
1845 intel_dp_set_m_n(new_crtc_state, M1_N1);
1847 intel_set_transcoder_timings(new_crtc_state);
1848 intel_set_pipe_src_size(new_crtc_state);
1850 if (new_crtc_state->has_pch_encoder)
1851 intel_cpu_transcoder_set_m_n(new_crtc_state,
1852 &new_crtc_state->fdi_m_n, NULL);
1854 ilk_set_pipeconf(new_crtc_state);
1856 crtc->active = true;
1858 intel_encoders_pre_enable(state, crtc);
1860 if (new_crtc_state->has_pch_encoder) {
1861 /* Note: FDI PLL enabling _must_ be done before we enable the
1862 * cpu pipes, hence this is separate from all the other fdi/pch
1864 ilk_fdi_pll_enable(new_crtc_state);
1866 assert_fdi_tx_disabled(dev_priv, pipe);
1867 assert_fdi_rx_disabled(dev_priv, pipe);
1870 ilk_pfit_enable(new_crtc_state);
1873 * On ILK+ LUT must be loaded before the pipe is running but with
1876 intel_color_load_luts(new_crtc_state);
1877 intel_color_commit(new_crtc_state);
1878 /* update DSPCNTR to configure gamma for pipe bottom color */
1879 intel_disable_primary_plane(new_crtc_state);
1881 intel_initial_watermarks(state, crtc);
1882 intel_enable_transcoder(new_crtc_state);
1884 if (new_crtc_state->has_pch_encoder)
1885 ilk_pch_enable(state, crtc);
1887 intel_crtc_vblank_on(new_crtc_state);
1889 intel_encoders_enable(state, crtc);
1891 if (HAS_PCH_CPT(dev_priv))
1892 cpt_verify_modeset(dev_priv, pipe);
1895 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1896 * And a second vblank wait is needed at least on ILK with
1897 * some interlaced HDMI modes. Let's do the double wait always
1898 * in case there are more corner cases we don't know about.
1900 if (new_crtc_state->has_pch_encoder) {
1901 intel_crtc_wait_for_next_vblank(crtc);
1902 intel_crtc_wait_for_next_vblank(crtc);
1904 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1905 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1908 /* IPS only exists on ULT machines and is tied to pipe A. */
1909 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1911 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1914 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1915 enum pipe pipe, bool apply)
1917 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1918 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1925 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1928 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1930 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1931 enum pipe pipe = crtc->pipe;
1934 /* Wa_22010947358:adl-p */
1935 if (IS_ALDERLAKE_P(dev_priv))
1936 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1938 val = MBUS_DBOX_A_CREDIT(2);
1940 if (DISPLAY_VER(dev_priv) >= 12) {
1941 val |= MBUS_DBOX_BW_CREDIT(2);
1942 val |= MBUS_DBOX_B_CREDIT(12);
1944 val |= MBUS_DBOX_BW_CREDIT(1);
1945 val |= MBUS_DBOX_B_CREDIT(8);
1948 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1951 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1953 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1954 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1956 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1957 HSW_LINETIME(crtc_state->linetime) |
1958 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1961 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1963 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1964 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1965 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1968 val = intel_de_read(dev_priv, reg);
1969 val &= ~HSW_FRAME_START_DELAY_MASK;
1970 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1971 intel_de_write(dev_priv, reg, val);
1974 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1975 const struct intel_crtc_state *crtc_state)
1977 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1978 struct intel_crtc_state *master_crtc_state;
1979 struct intel_crtc *master_crtc;
1980 struct drm_connector_state *conn_state;
1981 struct drm_connector *conn;
1982 struct intel_encoder *encoder = NULL;
1985 master_crtc = intel_master_crtc(crtc_state);
1986 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1988 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1989 if (conn_state->crtc != &master_crtc->base)
1992 encoder = to_intel_encoder(conn_state->best_encoder);
1997 * Enable sequence steps 1-7 on bigjoiner master
1999 if (crtc_state->bigjoiner_slave)
2000 intel_encoders_pre_pll_enable(state, master_crtc);
2002 if (crtc_state->shared_dpll)
2003 intel_enable_shared_dpll(crtc_state);
2005 if (crtc_state->bigjoiner_slave)
2006 intel_encoders_pre_enable(state, master_crtc);
2008 /* need to enable VDSC, which we skipped in pre-enable */
2009 intel_dsc_enable(crtc_state);
2011 if (DISPLAY_VER(dev_priv) >= 13)
2012 intel_uncompressed_joiner_enable(crtc_state);
2015 static void hsw_crtc_enable(struct intel_atomic_state *state,
2016 struct intel_crtc *crtc)
2018 const struct intel_crtc_state *new_crtc_state =
2019 intel_atomic_get_new_crtc_state(state, crtc);
2020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2021 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2022 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2023 bool psl_clkgate_wa;
2025 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2028 if (!new_crtc_state->bigjoiner) {
2029 intel_encoders_pre_pll_enable(state, crtc);
2031 if (new_crtc_state->shared_dpll)
2032 intel_enable_shared_dpll(new_crtc_state);
2034 intel_encoders_pre_enable(state, crtc);
2036 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2039 intel_set_pipe_src_size(new_crtc_state);
2040 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2041 bdw_set_pipemisc(new_crtc_state);
2043 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
2044 intel_set_transcoder_timings(new_crtc_state);
2046 if (cpu_transcoder != TRANSCODER_EDP)
2047 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2048 new_crtc_state->pixel_multiplier - 1);
2050 if (new_crtc_state->has_pch_encoder)
2051 intel_cpu_transcoder_set_m_n(new_crtc_state,
2052 &new_crtc_state->fdi_m_n, NULL);
2054 hsw_set_frame_start_delay(new_crtc_state);
2056 hsw_set_transconf(new_crtc_state);
2059 crtc->active = true;
2061 /* Display WA #1180: WaDisableScalarClockGating: glk */
2062 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2063 new_crtc_state->pch_pfit.enabled;
2065 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2067 if (DISPLAY_VER(dev_priv) >= 9)
2068 skl_pfit_enable(new_crtc_state);
2070 ilk_pfit_enable(new_crtc_state);
2073 * On ILK+ LUT must be loaded before the pipe is running but with
2076 intel_color_load_luts(new_crtc_state);
2077 intel_color_commit(new_crtc_state);
2078 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
2079 if (DISPLAY_VER(dev_priv) < 9)
2080 intel_disable_primary_plane(new_crtc_state);
2082 hsw_set_linetime_wm(new_crtc_state);
2084 if (DISPLAY_VER(dev_priv) >= 11)
2085 icl_set_pipe_chicken(new_crtc_state);
2087 intel_initial_watermarks(state, crtc);
2089 if (DISPLAY_VER(dev_priv) >= 11) {
2090 const struct intel_dbuf_state *dbuf_state =
2091 intel_atomic_get_new_dbuf_state(state);
2093 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2096 if (new_crtc_state->bigjoiner_slave)
2097 intel_crtc_vblank_on(new_crtc_state);
2099 intel_encoders_enable(state, crtc);
2101 if (psl_clkgate_wa) {
2102 intel_crtc_wait_for_next_vblank(crtc);
2103 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2106 /* If we change the relative order between pipe/planes enabling, we need
2107 * to change the workaround. */
2108 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2109 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2110 struct intel_crtc *wa_crtc;
2112 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
2114 intel_crtc_wait_for_next_vblank(wa_crtc);
2115 intel_crtc_wait_for_next_vblank(wa_crtc);
2119 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2121 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2122 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2123 enum pipe pipe = crtc->pipe;
2125 /* To avoid upsetting the power well on haswell only disable the pfit if
2126 * it's in use. The hw state code will make sure we get this right. */
2127 if (!old_crtc_state->pch_pfit.enabled)
2130 intel_de_write(dev_priv, PF_CTL(pipe), 0);
2131 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2132 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2135 static void ilk_crtc_disable(struct intel_atomic_state *state,
2136 struct intel_crtc *crtc)
2138 const struct intel_crtc_state *old_crtc_state =
2139 intel_atomic_get_old_crtc_state(state, crtc);
2140 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2141 enum pipe pipe = crtc->pipe;
2144 * Sometimes spurious CPU pipe underruns happen when the
2145 * pipe is already disabled, but FDI RX/TX is still enabled.
2146 * Happens at least with VGA+HDMI cloning. Suppress them.
2148 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2149 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2151 intel_encoders_disable(state, crtc);
2153 intel_crtc_vblank_off(old_crtc_state);
2155 intel_disable_transcoder(old_crtc_state);
2157 ilk_pfit_disable(old_crtc_state);
2159 if (old_crtc_state->has_pch_encoder)
2160 ilk_pch_disable(state, crtc);
2162 intel_encoders_post_disable(state, crtc);
2164 if (old_crtc_state->has_pch_encoder)
2165 ilk_pch_post_disable(state, crtc);
2167 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2168 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2171 static void hsw_crtc_disable(struct intel_atomic_state *state,
2172 struct intel_crtc *crtc)
2174 const struct intel_crtc_state *old_crtc_state =
2175 intel_atomic_get_old_crtc_state(state, crtc);
2178 * FIXME collapse everything to one hook.
2179 * Need care with mst->ddi interactions.
2181 if (!old_crtc_state->bigjoiner_slave) {
2182 intel_encoders_disable(state, crtc);
2183 intel_encoders_post_disable(state, crtc);
2187 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2189 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2190 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2192 if (!crtc_state->gmch_pfit.control)
2196 * The panel fitter should only be adjusted whilst the pipe is disabled,
2197 * according to register description and PRM.
2199 drm_WARN_ON(&dev_priv->drm,
2200 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2201 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2203 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2204 crtc_state->gmch_pfit.pgm_ratios);
2205 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2207 /* Border color in case we don't scale up to the full screen. Black by
2208 * default, change to something else for debugging. */
2209 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2212 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2214 if (phy == PHY_NONE)
2216 else if (IS_DG2(dev_priv))
2218 * DG2 outputs labelled as "combo PHY" in the bspec use
2219 * SNPS PHYs with completely different programming,
2220 * hence we always return false here.
2223 else if (IS_ALDERLAKE_S(dev_priv))
2224 return phy <= PHY_E;
2225 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2226 return phy <= PHY_D;
2227 else if (IS_JSL_EHL(dev_priv))
2228 return phy <= PHY_C;
2229 else if (DISPLAY_VER(dev_priv) >= 11)
2230 return phy <= PHY_B;
2235 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2237 if (IS_DG2(dev_priv))
2238 /* DG2's "TC1" output uses a SNPS PHY */
2240 else if (IS_ALDERLAKE_P(dev_priv))
2241 return phy >= PHY_F && phy <= PHY_I;
2242 else if (IS_TIGERLAKE(dev_priv))
2243 return phy >= PHY_D && phy <= PHY_I;
2244 else if (IS_ICELAKE(dev_priv))
2245 return phy >= PHY_C && phy <= PHY_F;
2250 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2252 if (phy == PHY_NONE)
2254 else if (IS_DG2(dev_priv))
2256 * All four "combo" ports and the TC1 port (PHY E) use
2259 return phy <= PHY_E;
2264 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2266 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2267 return PHY_D + port - PORT_D_XELPD;
2268 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2269 return PHY_F + port - PORT_TC1;
2270 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2271 return PHY_B + port - PORT_TC1;
2272 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2273 return PHY_C + port - PORT_TC1;
2274 else if (IS_JSL_EHL(i915) && port == PORT_D)
2277 return PHY_A + port - PORT_A;
2280 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2282 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2283 return TC_PORT_NONE;
2285 if (DISPLAY_VER(dev_priv) >= 12)
2286 return TC_PORT_1 + port - PORT_TC1;
2288 return TC_PORT_1 + port - PORT_C;
2291 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2295 return POWER_DOMAIN_PORT_DDI_A_LANES;
2297 return POWER_DOMAIN_PORT_DDI_B_LANES;
2299 return POWER_DOMAIN_PORT_DDI_C_LANES;
2301 return POWER_DOMAIN_PORT_DDI_D_LANES;
2303 return POWER_DOMAIN_PORT_DDI_E_LANES;
2305 return POWER_DOMAIN_PORT_DDI_F_LANES;
2307 return POWER_DOMAIN_PORT_DDI_G_LANES;
2309 return POWER_DOMAIN_PORT_DDI_H_LANES;
2311 return POWER_DOMAIN_PORT_DDI_I_LANES;
2314 return POWER_DOMAIN_PORT_OTHER;
2318 enum intel_display_power_domain
2319 intel_aux_power_domain(struct intel_digital_port *dig_port)
2321 if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2322 switch (dig_port->aux_ch) {
2324 return POWER_DOMAIN_AUX_C_TBT;
2326 return POWER_DOMAIN_AUX_D_TBT;
2328 return POWER_DOMAIN_AUX_E_TBT;
2330 return POWER_DOMAIN_AUX_F_TBT;
2332 return POWER_DOMAIN_AUX_G_TBT;
2334 return POWER_DOMAIN_AUX_H_TBT;
2336 return POWER_DOMAIN_AUX_I_TBT;
2338 MISSING_CASE(dig_port->aux_ch);
2339 return POWER_DOMAIN_AUX_C_TBT;
2343 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2347 * Converts aux_ch to power_domain without caring about TBT ports for that use
2348 * intel_aux_power_domain()
2350 enum intel_display_power_domain
2351 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2355 return POWER_DOMAIN_AUX_A;
2357 return POWER_DOMAIN_AUX_B;
2359 return POWER_DOMAIN_AUX_C;
2361 return POWER_DOMAIN_AUX_D;
2363 return POWER_DOMAIN_AUX_E;
2365 return POWER_DOMAIN_AUX_F;
2367 return POWER_DOMAIN_AUX_G;
2369 return POWER_DOMAIN_AUX_H;
2371 return POWER_DOMAIN_AUX_I;
2373 MISSING_CASE(aux_ch);
2374 return POWER_DOMAIN_AUX_A;
2378 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2380 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2381 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2382 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2383 struct drm_encoder *encoder;
2384 enum pipe pipe = crtc->pipe;
2387 if (!crtc_state->hw.active)
2390 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2391 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2392 if (crtc_state->pch_pfit.enabled ||
2393 crtc_state->pch_pfit.force_thru)
2394 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2396 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2397 crtc_state->uapi.encoder_mask) {
2398 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2400 mask |= BIT_ULL(intel_encoder->power_domain);
2403 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2404 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2406 if (crtc_state->shared_dpll)
2407 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2409 if (crtc_state->dsc.compression_enable)
2410 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2416 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2418 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2419 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2420 enum intel_display_power_domain domain;
2421 u64 domains, new_domains, old_domains;
2423 domains = get_crtc_power_domains(crtc_state);
2425 new_domains = domains & ~crtc->enabled_power_domains.mask;
2426 old_domains = crtc->enabled_power_domains.mask & ~domains;
2428 for_each_power_domain(domain, new_domains)
2429 intel_display_power_get_in_set(dev_priv,
2430 &crtc->enabled_power_domains,
2436 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2439 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2440 &crtc->enabled_power_domains,
2444 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2445 struct intel_crtc *crtc)
2447 const struct intel_crtc_state *new_crtc_state =
2448 intel_atomic_get_new_crtc_state(state, crtc);
2449 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2450 enum pipe pipe = crtc->pipe;
2452 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2455 if (intel_crtc_has_dp_encoder(new_crtc_state))
2456 intel_dp_set_m_n(new_crtc_state, M1_N1);
2458 intel_set_transcoder_timings(new_crtc_state);
2459 intel_set_pipe_src_size(new_crtc_state);
2461 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2462 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2463 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2466 i9xx_set_pipeconf(new_crtc_state);
2468 crtc->active = true;
2470 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2472 intel_encoders_pre_pll_enable(state, crtc);
2474 if (IS_CHERRYVIEW(dev_priv))
2475 chv_enable_pll(new_crtc_state);
2477 vlv_enable_pll(new_crtc_state);
2479 intel_encoders_pre_enable(state, crtc);
2481 i9xx_pfit_enable(new_crtc_state);
2483 intel_color_load_luts(new_crtc_state);
2484 intel_color_commit(new_crtc_state);
2485 /* update DSPCNTR to configure gamma for pipe bottom color */
2486 intel_disable_primary_plane(new_crtc_state);
2488 intel_initial_watermarks(state, crtc);
2489 intel_enable_transcoder(new_crtc_state);
2491 intel_crtc_vblank_on(new_crtc_state);
2493 intel_encoders_enable(state, crtc);
2496 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2497 struct intel_crtc *crtc)
2499 const struct intel_crtc_state *new_crtc_state =
2500 intel_atomic_get_new_crtc_state(state, crtc);
2501 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2502 enum pipe pipe = crtc->pipe;
2504 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2507 if (intel_crtc_has_dp_encoder(new_crtc_state))
2508 intel_dp_set_m_n(new_crtc_state, M1_N1);
2510 intel_set_transcoder_timings(new_crtc_state);
2511 intel_set_pipe_src_size(new_crtc_state);
2513 i9xx_set_pipeconf(new_crtc_state);
2515 crtc->active = true;
2517 if (DISPLAY_VER(dev_priv) != 2)
2518 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2520 intel_encoders_pre_enable(state, crtc);
2522 i9xx_enable_pll(new_crtc_state);
2524 i9xx_pfit_enable(new_crtc_state);
2526 intel_color_load_luts(new_crtc_state);
2527 intel_color_commit(new_crtc_state);
2528 /* update DSPCNTR to configure gamma for pipe bottom color */
2529 intel_disable_primary_plane(new_crtc_state);
2531 if (!intel_initial_watermarks(state, crtc))
2532 intel_update_watermarks(dev_priv);
2533 intel_enable_transcoder(new_crtc_state);
2535 intel_crtc_vblank_on(new_crtc_state);
2537 intel_encoders_enable(state, crtc);
2539 /* prevents spurious underruns */
2540 if (DISPLAY_VER(dev_priv) == 2)
2541 intel_crtc_wait_for_next_vblank(crtc);
2544 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2546 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2547 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2549 if (!old_crtc_state->gmch_pfit.control)
2552 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2554 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2555 intel_de_read(dev_priv, PFIT_CONTROL));
2556 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2559 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2560 struct intel_crtc *crtc)
2562 struct intel_crtc_state *old_crtc_state =
2563 intel_atomic_get_old_crtc_state(state, crtc);
2564 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2565 enum pipe pipe = crtc->pipe;
2568 * On gen2 planes are double buffered but the pipe isn't, so we must
2569 * wait for planes to fully turn off before disabling the pipe.
2571 if (DISPLAY_VER(dev_priv) == 2)
2572 intel_crtc_wait_for_next_vblank(crtc);
2574 intel_encoders_disable(state, crtc);
2576 intel_crtc_vblank_off(old_crtc_state);
2578 intel_disable_transcoder(old_crtc_state);
2580 i9xx_pfit_disable(old_crtc_state);
2582 intel_encoders_post_disable(state, crtc);
2584 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2585 if (IS_CHERRYVIEW(dev_priv))
2586 chv_disable_pll(dev_priv, pipe);
2587 else if (IS_VALLEYVIEW(dev_priv))
2588 vlv_disable_pll(dev_priv, pipe);
2590 i9xx_disable_pll(old_crtc_state);
2593 intel_encoders_post_pll_disable(state, crtc);
2595 if (DISPLAY_VER(dev_priv) != 2)
2596 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2598 if (!dev_priv->wm_disp->initial_watermarks)
2599 intel_update_watermarks(dev_priv);
2601 /* clock the pipe down to 640x480@60 to potentially save power */
2602 if (IS_I830(dev_priv))
2603 i830_enable_pipe(dev_priv, pipe);
2606 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2607 struct drm_modeset_acquire_ctx *ctx)
2609 struct intel_encoder *encoder;
2610 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2611 struct intel_bw_state *bw_state =
2612 to_intel_bw_state(dev_priv->bw_obj.state);
2613 struct intel_cdclk_state *cdclk_state =
2614 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2615 struct intel_dbuf_state *dbuf_state =
2616 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2617 struct intel_crtc_state *crtc_state =
2618 to_intel_crtc_state(crtc->base.state);
2619 struct intel_plane *plane;
2620 struct drm_atomic_state *state;
2621 struct intel_crtc_state *temp_crtc_state;
2622 enum pipe pipe = crtc->pipe;
2625 if (!crtc_state->hw.active)
2628 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2629 const struct intel_plane_state *plane_state =
2630 to_intel_plane_state(plane->base.state);
2632 if (plane_state->uapi.visible)
2633 intel_plane_disable_noatomic(crtc, plane);
2636 state = drm_atomic_state_alloc(&dev_priv->drm);
2638 drm_dbg_kms(&dev_priv->drm,
2639 "failed to disable [CRTC:%d:%s], out of memory",
2640 crtc->base.base.id, crtc->base.name);
2644 state->acquire_ctx = ctx;
2646 /* Everything's already locked, -EDEADLK can't happen. */
2647 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2648 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2650 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2652 dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2654 drm_atomic_state_put(state);
2656 drm_dbg_kms(&dev_priv->drm,
2657 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2658 crtc->base.base.id, crtc->base.name);
2660 crtc->active = false;
2661 crtc->base.enabled = false;
2663 drm_WARN_ON(&dev_priv->drm,
2664 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2665 crtc_state->uapi.active = false;
2666 crtc_state->uapi.connector_mask = 0;
2667 crtc_state->uapi.encoder_mask = 0;
2668 intel_crtc_free_hw_state(crtc_state);
2669 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2671 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2672 encoder->base.crtc = NULL;
2674 intel_fbc_disable(crtc);
2675 intel_update_watermarks(dev_priv);
2676 intel_disable_shared_dpll(crtc_state);
2678 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2680 cdclk_state->min_cdclk[pipe] = 0;
2681 cdclk_state->min_voltage_level[pipe] = 0;
2682 cdclk_state->active_pipes &= ~BIT(pipe);
2684 dbuf_state->active_pipes &= ~BIT(pipe);
2686 bw_state->data_rate[pipe] = 0;
2687 bw_state->num_active_planes[pipe] = 0;
2691 * turn all crtc's off, but do not adjust state
2692 * This has to be paired with a call to intel_modeset_setup_hw_state.
2694 int intel_display_suspend(struct drm_device *dev)
2696 struct drm_i915_private *dev_priv = to_i915(dev);
2697 struct drm_atomic_state *state;
2700 if (!HAS_DISPLAY(dev_priv))
2703 state = drm_atomic_helper_suspend(dev);
2704 ret = PTR_ERR_OR_ZERO(state);
2706 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2709 dev_priv->modeset_restore_state = state;
2713 void intel_encoder_destroy(struct drm_encoder *encoder)
2715 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2717 drm_encoder_cleanup(encoder);
2718 kfree(intel_encoder);
2721 /* Cross check the actual hw state with our own modeset state tracking (and it's
2722 * internal consistency). */
2723 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2724 struct drm_connector_state *conn_state)
2726 struct intel_connector *connector = to_intel_connector(conn_state->connector);
2727 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2729 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2730 connector->base.base.id, connector->base.name);
2732 if (connector->get_hw_state(connector)) {
2733 struct intel_encoder *encoder = intel_attached_encoder(connector);
2735 I915_STATE_WARN(!crtc_state,
2736 "connector enabled without attached crtc\n");
2741 I915_STATE_WARN(!crtc_state->hw.active,
2742 "connector is active, but attached crtc isn't\n");
2744 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2747 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2748 "atomic encoder doesn't match attached encoder\n");
2750 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2751 "attached encoder crtc differs from connector crtc\n");
2753 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2754 "attached crtc is active, but connector isn't\n");
2755 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2756 "best encoder set without crtc!\n");
2760 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2762 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2763 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2765 /* IPS only exists on ULT machines and is tied to pipe A. */
2766 if (!hsw_crtc_supports_ips(crtc))
2769 if (!dev_priv->params.enable_ips)
2772 if (crtc_state->pipe_bpp > 24)
2776 * We compare against max which means we must take
2777 * the increased cdclk requirement into account when
2778 * calculating the new cdclk.
2780 * Should measure whether using a lower cdclk w/o IPS
2782 if (IS_BROADWELL(dev_priv) &&
2783 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2789 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2791 struct drm_i915_private *dev_priv =
2792 to_i915(crtc_state->uapi.crtc->dev);
2793 struct intel_atomic_state *state =
2794 to_intel_atomic_state(crtc_state->uapi.state);
2796 crtc_state->ips_enabled = false;
2798 if (!hsw_crtc_state_ips_capable(crtc_state))
2802 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2803 * enabled and disabled dynamically based on package C states,
2804 * user space can't make reliable use of the CRCs, so let's just
2805 * completely disable it.
2807 if (crtc_state->crc_enabled)
2810 /* IPS should be fine as long as at least one plane is enabled. */
2811 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2814 if (IS_BROADWELL(dev_priv)) {
2815 const struct intel_cdclk_state *cdclk_state;
2817 cdclk_state = intel_atomic_get_cdclk_state(state);
2818 if (IS_ERR(cdclk_state))
2819 return PTR_ERR(cdclk_state);
2821 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2822 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2826 crtc_state->ips_enabled = true;
2831 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2833 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2835 /* GDG double wide on either pipe, otherwise pipe A only */
2836 return DISPLAY_VER(dev_priv) < 4 &&
2837 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2840 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2842 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2843 struct drm_rect src;
2846 * We only use IF-ID interlacing. If we ever use
2847 * PF-ID we'll need to adjust the pixel_rate here.
2850 if (!crtc_state->pch_pfit.enabled)
2853 drm_rect_init(&src, 0, 0,
2854 crtc_state->pipe_src_w << 16,
2855 crtc_state->pipe_src_h << 16);
2857 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2861 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2862 const struct drm_display_mode *timings)
2864 mode->hdisplay = timings->crtc_hdisplay;
2865 mode->htotal = timings->crtc_htotal;
2866 mode->hsync_start = timings->crtc_hsync_start;
2867 mode->hsync_end = timings->crtc_hsync_end;
2869 mode->vdisplay = timings->crtc_vdisplay;
2870 mode->vtotal = timings->crtc_vtotal;
2871 mode->vsync_start = timings->crtc_vsync_start;
2872 mode->vsync_end = timings->crtc_vsync_end;
2874 mode->flags = timings->flags;
2875 mode->type = DRM_MODE_TYPE_DRIVER;
2877 mode->clock = timings->crtc_clock;
2879 drm_mode_set_name(mode);
2882 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2884 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2886 if (HAS_GMCH(dev_priv))
2887 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2888 crtc_state->pixel_rate =
2889 crtc_state->hw.pipe_mode.crtc_clock;
2891 crtc_state->pixel_rate =
2892 ilk_pipe_pixel_rate(crtc_state);
2895 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2897 struct drm_display_mode *mode = &crtc_state->hw.mode;
2898 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2899 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2901 drm_mode_copy(pipe_mode, adjusted_mode);
2903 if (crtc_state->bigjoiner) {
2905 * transcoder is programmed to the full mode,
2906 * but pipe timings are half of the transcoder mode
2908 pipe_mode->crtc_hdisplay /= 2;
2909 pipe_mode->crtc_hblank_start /= 2;
2910 pipe_mode->crtc_hblank_end /= 2;
2911 pipe_mode->crtc_hsync_start /= 2;
2912 pipe_mode->crtc_hsync_end /= 2;
2913 pipe_mode->crtc_htotal /= 2;
2914 pipe_mode->crtc_clock /= 2;
2917 if (crtc_state->splitter.enable) {
2918 int n = crtc_state->splitter.link_count;
2919 int overlap = crtc_state->splitter.pixel_overlap;
2922 * eDP MSO uses segment timings from EDID for transcoder
2923 * timings, but full mode for everything else.
2925 * h_full = (h_segment - pixel_overlap) * link_count
2927 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2928 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2929 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2930 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2931 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2932 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2933 pipe_mode->crtc_clock *= n;
2935 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2936 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2938 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2939 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2942 intel_crtc_compute_pixel_rate(crtc_state);
2944 drm_mode_copy(mode, adjusted_mode);
2945 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2946 mode->vdisplay = crtc_state->pipe_src_h;
2949 static void intel_encoder_get_config(struct intel_encoder *encoder,
2950 struct intel_crtc_state *crtc_state)
2952 encoder->get_config(encoder, crtc_state);
2954 intel_crtc_readout_derived_state(crtc_state);
2957 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2958 struct intel_crtc_state *pipe_config)
2960 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2961 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2962 int clock_limit = dev_priv->max_dotclk_freq;
2964 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2966 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2967 if (pipe_config->bigjoiner) {
2968 pipe_mode->crtc_clock /= 2;
2969 pipe_mode->crtc_hdisplay /= 2;
2970 pipe_mode->crtc_hblank_start /= 2;
2971 pipe_mode->crtc_hblank_end /= 2;
2972 pipe_mode->crtc_hsync_start /= 2;
2973 pipe_mode->crtc_hsync_end /= 2;
2974 pipe_mode->crtc_htotal /= 2;
2975 pipe_config->pipe_src_w /= 2;
2978 if (pipe_config->splitter.enable) {
2979 int n = pipe_config->splitter.link_count;
2980 int overlap = pipe_config->splitter.pixel_overlap;
2982 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2983 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2984 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2985 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2986 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2987 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2988 pipe_mode->crtc_clock *= n;
2991 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2993 if (DISPLAY_VER(dev_priv) < 4) {
2994 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
2997 * Enable double wide mode when the dot clock
2998 * is > 90% of the (display) core speed.
3000 if (intel_crtc_supports_double_wide(crtc) &&
3001 pipe_mode->crtc_clock > clock_limit) {
3002 clock_limit = dev_priv->max_dotclk_freq;
3003 pipe_config->double_wide = true;
3007 if (pipe_mode->crtc_clock > clock_limit) {
3008 drm_dbg_kms(&dev_priv->drm,
3009 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3010 pipe_mode->crtc_clock, clock_limit,
3011 yesno(pipe_config->double_wide));
3016 * Pipe horizontal size must be even in:
3018 * - LVDS dual channel mode
3019 * - Double wide pipe
3021 if (pipe_config->pipe_src_w & 1) {
3022 if (pipe_config->double_wide) {
3023 drm_dbg_kms(&dev_priv->drm,
3024 "Odd pipe source width not supported with double wide pipe\n");
3028 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3029 intel_is_dual_link_lvds(dev_priv)) {
3030 drm_dbg_kms(&dev_priv->drm,
3031 "Odd pipe source width not supported with dual link LVDS\n");
3036 intel_crtc_compute_pixel_rate(pipe_config);
3038 if (pipe_config->has_pch_encoder)
3039 return ilk_fdi_compute_config(crtc, pipe_config);
3045 intel_reduce_m_n_ratio(u32 *num, u32 *den)
3047 while (*num > DATA_LINK_M_N_MASK ||
3048 *den > DATA_LINK_M_N_MASK) {
3054 static void compute_m_n(unsigned int m, unsigned int n,
3055 u32 *ret_m, u32 *ret_n,
3059 * Several DP dongles in particular seem to be fussy about
3060 * too large link M/N values. Give N value as 0x8000 that
3061 * should be acceptable by specific devices. 0x8000 is the
3062 * specified fixed N value for asynchronous clock mode,
3063 * which the devices expect also in synchronous clock mode.
3066 *ret_n = DP_LINK_CONSTANT_N_VALUE;
3068 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3070 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3071 intel_reduce_m_n_ratio(ret_m, ret_n);
3075 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3076 int pixel_clock, int link_clock,
3077 struct intel_link_m_n *m_n,
3078 bool constant_n, bool fec_enable)
3080 u32 data_clock = bits_per_pixel * pixel_clock;
3083 data_clock = intel_dp_mode_to_fec_clock(data_clock);
3086 compute_m_n(data_clock,
3087 link_clock * nlanes * 8,
3088 &m_n->gmch_m, &m_n->gmch_n,
3091 compute_m_n(pixel_clock, link_clock,
3092 &m_n->link_m, &m_n->link_n,
3096 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3099 * There may be no VBT; and if the BIOS enabled SSC we can
3100 * just keep using it to avoid unnecessary flicker. Whereas if the
3101 * BIOS isn't using it, don't assume it will work even if the VBT
3102 * indicates as much.
3104 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3105 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3109 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3110 drm_dbg_kms(&dev_priv->drm,
3111 "SSC %s by BIOS, overriding VBT which says %s\n",
3112 enableddisabled(bios_lvds_use_ssc),
3113 enableddisabled(dev_priv->vbt.lvds_use_ssc));
3114 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3119 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3120 const struct intel_link_m_n *m_n)
3122 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3123 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3124 enum pipe pipe = crtc->pipe;
3126 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
3127 TU_SIZE(m_n->tu) | m_n->gmch_m);
3128 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
3129 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
3130 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
3133 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3134 enum transcoder transcoder)
3136 if (IS_HASWELL(dev_priv))
3137 return transcoder == TRANSCODER_EDP;
3140 * Strictly speaking some registers are available before
3141 * gen7, but we only support DRRS on gen7+
3143 return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3146 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3147 const struct intel_link_m_n *m_n,
3148 const struct intel_link_m_n *m2_n2)
3150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3151 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3152 enum pipe pipe = crtc->pipe;
3153 enum transcoder transcoder = crtc_state->cpu_transcoder;
3155 if (DISPLAY_VER(dev_priv) >= 5) {
3156 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
3157 TU_SIZE(m_n->tu) | m_n->gmch_m);
3158 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
3160 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
3162 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
3165 * M2_N2 registers are set only if DRRS is supported
3166 * (to make sure the registers are not unnecessarily accessed).
3168 if (m2_n2 && crtc_state->has_drrs &&
3169 transcoder_has_m2_n2(dev_priv, transcoder)) {
3170 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
3171 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
3172 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
3174 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
3176 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
3180 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
3181 TU_SIZE(m_n->tu) | m_n->gmch_m);
3182 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
3183 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
3184 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
3188 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
3190 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
3191 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3194 dp_m_n = &crtc_state->dp_m_n;
3195 dp_m2_n2 = &crtc_state->dp_m2_n2;
3196 } else if (m_n == M2_N2) {
3199 * M2_N2 registers are not supported. Hence m2_n2 divider value
3200 * needs to be programmed into M1_N1.
3202 dp_m_n = &crtc_state->dp_m2_n2;
3204 drm_err(&i915->drm, "Unsupported divider value\n");
3208 if (crtc_state->has_pch_encoder)
3209 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
3211 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
3214 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3216 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3217 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3218 enum pipe pipe = crtc->pipe;
3219 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3220 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3221 u32 crtc_vtotal, crtc_vblank_end;
3224 /* We need to be careful not to changed the adjusted mode, for otherwise
3225 * the hw state checker will get angry at the mismatch. */
3226 crtc_vtotal = adjusted_mode->crtc_vtotal;
3227 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3229 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3230 /* the chip adds 2 halflines automatically */
3232 crtc_vblank_end -= 1;
3234 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3235 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3237 vsyncshift = adjusted_mode->crtc_hsync_start -
3238 adjusted_mode->crtc_htotal / 2;
3240 vsyncshift += adjusted_mode->crtc_htotal;
3243 if (DISPLAY_VER(dev_priv) > 3)
3244 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3247 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3248 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3249 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3250 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3251 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3252 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3254 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3255 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3256 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3257 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3258 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3259 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3261 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3262 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3263 * documented on the DDI_FUNC_CTL register description, EDP Input Select
3265 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3266 (pipe == PIPE_B || pipe == PIPE_C))
3267 intel_de_write(dev_priv, VTOTAL(pipe),
3268 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3272 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3274 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3275 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3276 enum pipe pipe = crtc->pipe;
3278 /* pipesrc controls the size that is scaled from, which should
3279 * always be the user's requested size.
3281 intel_de_write(dev_priv, PIPESRC(pipe),
3282 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
3285 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3287 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3288 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3290 if (DISPLAY_VER(dev_priv) == 2)
3293 if (DISPLAY_VER(dev_priv) >= 9 ||
3294 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3295 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3297 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3300 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3301 struct intel_crtc_state *pipe_config)
3303 struct drm_device *dev = crtc->base.dev;
3304 struct drm_i915_private *dev_priv = to_i915(dev);
3305 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3308 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3309 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3310 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3312 if (!transcoder_is_dsi(cpu_transcoder)) {
3313 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3314 pipe_config->hw.adjusted_mode.crtc_hblank_start =
3316 pipe_config->hw.adjusted_mode.crtc_hblank_end =
3317 ((tmp >> 16) & 0xffff) + 1;
3319 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3320 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3321 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3323 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3324 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3325 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3327 if (!transcoder_is_dsi(cpu_transcoder)) {
3328 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3329 pipe_config->hw.adjusted_mode.crtc_vblank_start =
3331 pipe_config->hw.adjusted_mode.crtc_vblank_end =
3332 ((tmp >> 16) & 0xffff) + 1;
3334 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3335 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3336 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3338 if (intel_pipe_is_interlaced(pipe_config)) {
3339 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3340 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3341 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3345 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3346 struct intel_crtc_state *pipe_config)
3348 struct drm_device *dev = crtc->base.dev;
3349 struct drm_i915_private *dev_priv = to_i915(dev);
3352 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3353 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
3354 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
3357 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3359 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3365 /* we keep both pipes enabled on 830 */
3366 if (IS_I830(dev_priv))
3367 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3369 if (crtc_state->double_wide)
3370 pipeconf |= PIPECONF_DOUBLE_WIDE;
3372 /* only g4x and later have fancy bpc/dither controls */
3373 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3374 IS_CHERRYVIEW(dev_priv)) {
3375 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3376 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3377 pipeconf |= PIPECONF_DITHER_EN |
3378 PIPECONF_DITHER_TYPE_SP;
3380 switch (crtc_state->pipe_bpp) {
3382 pipeconf |= PIPECONF_6BPC;
3385 pipeconf |= PIPECONF_8BPC;
3388 pipeconf |= PIPECONF_10BPC;
3391 /* Case prevented by intel_choose_pipe_bpp_dither. */
3396 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3397 if (DISPLAY_VER(dev_priv) < 4 ||
3398 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3399 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3401 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3403 pipeconf |= PIPECONF_PROGRESSIVE;
3406 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3407 crtc_state->limited_color_range)
3408 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3410 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3412 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3414 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3415 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3418 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3420 if (IS_I830(dev_priv))
3423 return DISPLAY_VER(dev_priv) >= 4 ||
3424 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3427 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3429 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3430 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3433 if (!i9xx_has_pfit(dev_priv))
3436 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3437 if (!(tmp & PFIT_ENABLE))
3440 /* Check whether the pfit is attached to our pipe. */
3441 if (DISPLAY_VER(dev_priv) < 4) {
3442 if (crtc->pipe != PIPE_B)
3445 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3449 crtc_state->gmch_pfit.control = tmp;
3450 crtc_state->gmch_pfit.pgm_ratios =
3451 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3454 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3455 struct intel_crtc_state *pipe_config)
3457 struct drm_device *dev = crtc->base.dev;
3458 struct drm_i915_private *dev_priv = to_i915(dev);
3459 enum pipe pipe = crtc->pipe;
3462 int refclk = 100000;
3464 /* In case of DSI, DPLL will not be used */
3465 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3468 vlv_dpio_get(dev_priv);
3469 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3470 vlv_dpio_put(dev_priv);
3472 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3473 clock.m2 = mdiv & DPIO_M2DIV_MASK;
3474 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3475 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3476 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3478 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3481 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3482 struct intel_crtc_state *pipe_config)
3484 struct drm_device *dev = crtc->base.dev;
3485 struct drm_i915_private *dev_priv = to_i915(dev);
3486 enum pipe pipe = crtc->pipe;
3487 enum dpio_channel port = vlv_pipe_to_channel(pipe);
3489 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3490 int refclk = 100000;
3492 /* In case of DSI, DPLL will not be used */
3493 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3496 vlv_dpio_get(dev_priv);
3497 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3498 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3499 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3500 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3501 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3502 vlv_dpio_put(dev_priv);
3504 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3505 clock.m2 = (pll_dw0 & 0xff) << 22;
3506 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3507 clock.m2 |= pll_dw2 & 0x3fffff;
3508 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3509 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3510 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3512 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3515 static enum intel_output_format
3516 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3518 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3521 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3523 if (tmp & PIPEMISC_YUV420_ENABLE) {
3524 /* We support 4:2:0 in full blend mode only */
3525 drm_WARN_ON(&dev_priv->drm,
3526 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3528 return INTEL_OUTPUT_FORMAT_YCBCR420;
3529 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3530 return INTEL_OUTPUT_FORMAT_YCBCR444;
3532 return INTEL_OUTPUT_FORMAT_RGB;
3536 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3538 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3539 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3540 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3541 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3544 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3546 if (tmp & DISPPLANE_GAMMA_ENABLE)
3547 crtc_state->gamma_enable = true;
3549 if (!HAS_GMCH(dev_priv) &&
3550 tmp & DISPPLANE_PIPE_CSC_ENABLE)
3551 crtc_state->csc_enable = true;
3554 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3555 struct intel_crtc_state *pipe_config)
3557 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3558 enum intel_display_power_domain power_domain;
3559 intel_wakeref_t wakeref;
3563 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3564 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3568 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3569 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3570 pipe_config->shared_dpll = NULL;
3574 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3575 if (!(tmp & PIPECONF_ENABLE))
3578 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3579 IS_CHERRYVIEW(dev_priv)) {
3580 switch (tmp & PIPECONF_BPC_MASK) {
3582 pipe_config->pipe_bpp = 18;
3585 pipe_config->pipe_bpp = 24;
3587 case PIPECONF_10BPC:
3588 pipe_config->pipe_bpp = 30;
3595 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3596 (tmp & PIPECONF_COLOR_RANGE_SELECT))
3597 pipe_config->limited_color_range = true;
3599 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
3600 PIPECONF_GAMMA_MODE_SHIFT;
3602 if (IS_CHERRYVIEW(dev_priv))
3603 pipe_config->cgm_mode = intel_de_read(dev_priv,
3604 CGM_PIPE_MODE(crtc->pipe));
3606 i9xx_get_pipe_color_config(pipe_config);
3607 intel_color_get_config(pipe_config);
3609 if (DISPLAY_VER(dev_priv) < 4)
3610 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3612 intel_get_transcoder_timings(crtc, pipe_config);
3613 intel_get_pipe_src_size(crtc, pipe_config);
3615 i9xx_get_pfit_config(pipe_config);
3617 if (DISPLAY_VER(dev_priv) >= 4) {
3618 /* No way to read it out on pipes B and C */
3619 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3620 tmp = dev_priv->chv_dpll_md[crtc->pipe];
3622 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3623 pipe_config->pixel_multiplier =
3624 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3625 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3626 pipe_config->dpll_hw_state.dpll_md = tmp;
3627 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3628 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3629 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3630 pipe_config->pixel_multiplier =
3631 ((tmp & SDVO_MULTIPLIER_MASK)
3632 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3634 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3635 * port and will be fixed up in the encoder->get_config
3637 pipe_config->pixel_multiplier = 1;
3639 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3641 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3642 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3644 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3647 /* Mask out read-only status bits. */
3648 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3649 DPLL_PORTC_READY_MASK |
3650 DPLL_PORTB_READY_MASK);
3653 if (IS_CHERRYVIEW(dev_priv))
3654 chv_crtc_clock_get(crtc, pipe_config);
3655 else if (IS_VALLEYVIEW(dev_priv))
3656 vlv_crtc_clock_get(crtc, pipe_config);
3658 i9xx_crtc_clock_get(crtc, pipe_config);
3661 * Normally the dotclock is filled in by the encoder .get_config()
3662 * but in case the pipe is enabled w/o any ports we need a sane
3665 pipe_config->hw.adjusted_mode.crtc_clock =
3666 pipe_config->port_clock / pipe_config->pixel_multiplier;
3671 intel_display_power_put(dev_priv, power_domain, wakeref);
3676 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3678 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3679 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3680 enum pipe pipe = crtc->pipe;
3685 switch (crtc_state->pipe_bpp) {
3687 val |= PIPECONF_6BPC;
3690 val |= PIPECONF_8BPC;
3693 val |= PIPECONF_10BPC;
3696 val |= PIPECONF_12BPC;
3699 /* Case prevented by intel_choose_pipe_bpp_dither. */
3703 if (crtc_state->dither)
3704 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3706 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3707 val |= PIPECONF_INTERLACED_ILK;
3709 val |= PIPECONF_PROGRESSIVE;
3712 * This would end up with an odd purple hue over
3713 * the entire display. Make sure we don't do it.
3715 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3716 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3718 if (crtc_state->limited_color_range &&
3719 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3720 val |= PIPECONF_COLOR_RANGE_SELECT;
3722 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3723 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3725 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3727 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3729 intel_de_write(dev_priv, PIPECONF(pipe), val);
3730 intel_de_posting_read(dev_priv, PIPECONF(pipe));
3733 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3735 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3736 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3737 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3740 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3741 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3743 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3744 val |= PIPECONF_INTERLACED_ILK;
3746 val |= PIPECONF_PROGRESSIVE;
3748 if (IS_HASWELL(dev_priv) &&
3749 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3750 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3752 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3753 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3756 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3758 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3759 const struct intel_crtc_scaler_state *scaler_state =
3760 &crtc_state->scaler_state;
3762 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3766 switch (crtc_state->pipe_bpp) {
3768 val |= PIPEMISC_6_BPC;
3771 val |= PIPEMISC_8_BPC;
3774 val |= PIPEMISC_10_BPC;
3777 /* Port output 12BPC defined for ADLP+ */
3778 if (DISPLAY_VER(dev_priv) > 12)
3779 val |= PIPEMISC_12_BPC_ADLP;
3782 MISSING_CASE(crtc_state->pipe_bpp);
3786 if (crtc_state->dither)
3787 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3789 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3790 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3791 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3793 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3794 val |= PIPEMISC_YUV420_ENABLE |
3795 PIPEMISC_YUV420_MODE_FULL_BLEND;
3797 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3798 val |= PIPEMISC_HDR_MODE_PRECISION;
3800 if (DISPLAY_VER(dev_priv) >= 12)
3801 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3803 if (IS_ALDERLAKE_P(dev_priv)) {
3804 bool scaler_in_use = false;
3806 for (i = 0; i < crtc->num_scalers; i++) {
3807 if (!scaler_state->scalers[i].in_use)
3810 scaler_in_use = true;
3814 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3815 PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
3816 scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3817 PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3820 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3823 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3825 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3828 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3830 switch (tmp & PIPEMISC_BPC_MASK) {
3831 case PIPEMISC_6_BPC:
3833 case PIPEMISC_8_BPC:
3835 case PIPEMISC_10_BPC:
3838 * PORT OUTPUT 12 BPC defined for ADLP+.
3841 * For previous platforms with DSI interface, bits 5:7
3842 * are used for storing pipe_bpp irrespective of dithering.
3843 * Since the value of 12 BPC is not defined for these bits
3844 * on older platforms, need to find a workaround for 12 BPC
3845 * MIPI DSI HW readout.
3847 case PIPEMISC_12_BPC_ADLP:
3848 if (DISPLAY_VER(dev_priv) > 12)
3857 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3860 * Account for spread spectrum to avoid
3861 * oversubscribing the link. Max center spread
3862 * is 2.5%; use 5% for safety's sake.
3864 u32 bps = target_clock * bpp * 21 / 20;
3865 return DIV_ROUND_UP(bps, link_bw * 8);
3868 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3869 struct intel_link_m_n *m_n)
3871 struct drm_device *dev = crtc->base.dev;
3872 struct drm_i915_private *dev_priv = to_i915(dev);
3873 enum pipe pipe = crtc->pipe;
3875 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
3876 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
3877 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3879 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
3880 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3881 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3884 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3885 enum transcoder transcoder,
3886 struct intel_link_m_n *m_n,
3887 struct intel_link_m_n *m2_n2)
3889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3890 enum pipe pipe = crtc->pipe;
3892 if (DISPLAY_VER(dev_priv) >= 5) {
3893 m_n->link_m = intel_de_read(dev_priv,
3894 PIPE_LINK_M1(transcoder));
3895 m_n->link_n = intel_de_read(dev_priv,
3896 PIPE_LINK_N1(transcoder));
3897 m_n->gmch_m = intel_de_read(dev_priv,
3898 PIPE_DATA_M1(transcoder))
3900 m_n->gmch_n = intel_de_read(dev_priv,
3901 PIPE_DATA_N1(transcoder));
3902 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
3903 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3905 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3906 m2_n2->link_m = intel_de_read(dev_priv,
3907 PIPE_LINK_M2(transcoder));
3908 m2_n2->link_n = intel_de_read(dev_priv,
3909 PIPE_LINK_N2(transcoder));
3910 m2_n2->gmch_m = intel_de_read(dev_priv,
3911 PIPE_DATA_M2(transcoder))
3913 m2_n2->gmch_n = intel_de_read(dev_priv,
3914 PIPE_DATA_N2(transcoder));
3915 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
3916 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3919 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
3920 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
3921 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3923 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
3924 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3925 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3929 void intel_dp_get_m_n(struct intel_crtc *crtc,
3930 struct intel_crtc_state *pipe_config)
3932 if (pipe_config->has_pch_encoder)
3933 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3935 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3936 &pipe_config->dp_m_n,
3937 &pipe_config->dp_m2_n2);
3940 void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3941 struct intel_crtc_state *pipe_config)
3943 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3944 &pipe_config->fdi_m_n, NULL);
3947 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3950 drm_rect_init(&crtc_state->pch_pfit.dst,
3951 pos >> 16, pos & 0xffff,
3952 size >> 16, size & 0xffff);
3955 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3957 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3958 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3959 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3963 /* find scaler attached to this pipe */
3964 for (i = 0; i < crtc->num_scalers; i++) {
3967 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3968 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3972 crtc_state->pch_pfit.enabled = true;
3974 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3975 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3977 ilk_get_pfit_pos_size(crtc_state, pos, size);
3979 scaler_state->scalers[i].in_use = true;
3983 scaler_state->scaler_id = id;
3985 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3987 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3990 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3992 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3993 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3996 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3997 if ((ctl & PF_ENABLE) == 0)
4000 crtc_state->pch_pfit.enabled = true;
4002 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
4003 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
4005 ilk_get_pfit_pos_size(crtc_state, pos, size);
4008 * We currently do not free assignements of panel fitters on
4009 * ivb/hsw (since we don't use the higher upscaling modes which
4010 * differentiates them) so just WARN about this case for now.
4012 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
4013 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
4016 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
4017 struct intel_crtc_state *pipe_config)
4019 struct drm_device *dev = crtc->base.dev;
4020 struct drm_i915_private *dev_priv = to_i915(dev);
4021 enum intel_display_power_domain power_domain;
4022 intel_wakeref_t wakeref;
4026 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4027 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4031 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4032 pipe_config->shared_dpll = NULL;
4035 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4036 if (!(tmp & PIPECONF_ENABLE))
4039 switch (tmp & PIPECONF_BPC_MASK) {
4041 pipe_config->pipe_bpp = 18;
4044 pipe_config->pipe_bpp = 24;
4046 case PIPECONF_10BPC:
4047 pipe_config->pipe_bpp = 30;
4049 case PIPECONF_12BPC:
4050 pipe_config->pipe_bpp = 36;
4056 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4057 pipe_config->limited_color_range = true;
4059 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4060 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4061 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4062 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4065 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4069 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
4070 PIPECONF_GAMMA_MODE_SHIFT;
4072 pipe_config->csc_mode = intel_de_read(dev_priv,
4073 PIPE_CSC_MODE(crtc->pipe));
4075 i9xx_get_pipe_color_config(pipe_config);
4076 intel_color_get_config(pipe_config);
4078 pipe_config->pixel_multiplier = 1;
4080 ilk_pch_get_config(pipe_config);
4082 intel_get_transcoder_timings(crtc, pipe_config);
4083 intel_get_pipe_src_size(crtc, pipe_config);
4085 ilk_get_pfit_config(pipe_config);
4090 intel_display_power_put(dev_priv, power_domain, wakeref);
4095 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4097 if (DISPLAY_VER(i915) >= 12)
4098 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4099 else if (DISPLAY_VER(i915) >= 11)
4100 return BIT(PIPE_B) | BIT(PIPE_C);
4105 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4106 enum transcoder cpu_transcoder)
4108 enum intel_display_power_domain power_domain;
4109 intel_wakeref_t wakeref;
4112 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4114 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4115 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4117 return tmp & TRANS_DDI_FUNC_ENABLE;
4120 static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4122 u8 master_pipes = 0, slave_pipes = 0;
4123 struct intel_crtc *crtc;
4125 for_each_intel_crtc(&dev_priv->drm, crtc) {
4126 enum intel_display_power_domain power_domain;
4127 enum pipe pipe = crtc->pipe;
4128 intel_wakeref_t wakeref;
4130 if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4133 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4134 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4135 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4137 if (!(tmp & BIG_JOINER_ENABLE))
4140 if (tmp & MASTER_BIG_JOINER_ENABLE)
4141 master_pipes |= BIT(pipe);
4143 slave_pipes |= BIT(pipe);
4146 if (DISPLAY_VER(dev_priv) < 13)
4149 power_domain = POWER_DOMAIN_PIPE(pipe);
4150 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4151 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4153 if (tmp & UNCOMPRESSED_JOINER_MASTER)
4154 master_pipes |= BIT(pipe);
4155 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4156 slave_pipes |= BIT(pipe);
4160 /* Bigjoiner pipes should always be consecutive master and slave */
4161 drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4162 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4163 master_pipes, slave_pipes);
4168 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4170 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4172 if (DISPLAY_VER(i915) >= 11)
4173 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4175 return panel_transcoder_mask;
4178 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4180 struct drm_device *dev = crtc->base.dev;
4181 struct drm_i915_private *dev_priv = to_i915(dev);
4182 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4183 enum transcoder cpu_transcoder;
4184 u8 enabled_transcoders = 0;
4187 * XXX: Do intel_display_power_get_if_enabled before reading this (for
4188 * consistency and less surprising code; it's in always on power).
4190 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4191 panel_transcoder_mask) {
4192 enum intel_display_power_domain power_domain;
4193 intel_wakeref_t wakeref;
4194 enum pipe trans_pipe;
4197 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4198 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4199 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4201 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4204 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4207 "unknown pipe linked to transcoder %s\n",
4208 transcoder_name(cpu_transcoder));
4210 case TRANS_DDI_EDP_INPUT_A_ONOFF:
4211 case TRANS_DDI_EDP_INPUT_A_ON:
4212 trans_pipe = PIPE_A;
4214 case TRANS_DDI_EDP_INPUT_B_ONOFF:
4215 trans_pipe = PIPE_B;
4217 case TRANS_DDI_EDP_INPUT_C_ONOFF:
4218 trans_pipe = PIPE_C;
4220 case TRANS_DDI_EDP_INPUT_D_ONOFF:
4221 trans_pipe = PIPE_D;
4225 if (trans_pipe == crtc->pipe)
4226 enabled_transcoders |= BIT(cpu_transcoder);
4229 /* single pipe or bigjoiner master */
4230 cpu_transcoder = (enum transcoder) crtc->pipe;
4231 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4232 enabled_transcoders |= BIT(cpu_transcoder);
4234 /* bigjoiner slave -> consider the master pipe's transcoder as well */
4235 if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4236 cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4237 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4238 enabled_transcoders |= BIT(cpu_transcoder);
4241 return enabled_transcoders;
4244 static bool has_edp_transcoders(u8 enabled_transcoders)
4246 return enabled_transcoders & BIT(TRANSCODER_EDP);
4249 static bool has_dsi_transcoders(u8 enabled_transcoders)
4251 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4252 BIT(TRANSCODER_DSI_1));
4255 static bool has_pipe_transcoders(u8 enabled_transcoders)
4257 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4258 BIT(TRANSCODER_DSI_0) |
4259 BIT(TRANSCODER_DSI_1));
4262 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4263 u8 enabled_transcoders)
4265 /* Only one type of transcoder please */
4266 drm_WARN_ON(&i915->drm,
4267 has_edp_transcoders(enabled_transcoders) +
4268 has_dsi_transcoders(enabled_transcoders) +
4269 has_pipe_transcoders(enabled_transcoders) > 1);
4271 /* Only DSI transcoders can be ganged */
4272 drm_WARN_ON(&i915->drm,
4273 !has_dsi_transcoders(enabled_transcoders) &&
4274 !is_power_of_2(enabled_transcoders));
4277 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4278 struct intel_crtc_state *pipe_config,
4279 struct intel_display_power_domain_set *power_domain_set)
4281 struct drm_device *dev = crtc->base.dev;
4282 struct drm_i915_private *dev_priv = to_i915(dev);
4283 unsigned long enabled_transcoders;
4286 enabled_transcoders = hsw_enabled_transcoders(crtc);
4287 if (!enabled_transcoders)
4290 assert_enabled_transcoders(dev_priv, enabled_transcoders);
4293 * With the exception of DSI we should only ever have
4294 * a single enabled transcoder. With DSI let's just
4295 * pick the first one.
4297 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4299 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4300 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4303 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4304 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4306 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4307 pipe_config->pch_pfit.force_thru = true;
4310 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4312 return tmp & PIPECONF_ENABLE;
4315 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4316 struct intel_crtc_state *pipe_config,
4317 struct intel_display_power_domain_set *power_domain_set)
4319 struct drm_device *dev = crtc->base.dev;
4320 struct drm_i915_private *dev_priv = to_i915(dev);
4321 enum transcoder cpu_transcoder;
4325 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4327 cpu_transcoder = TRANSCODER_DSI_A;
4329 cpu_transcoder = TRANSCODER_DSI_C;
4331 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4332 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4336 * The PLL needs to be enabled with a valid divider
4337 * configuration, otherwise accessing DSI registers will hang
4338 * the machine. See BSpec North Display Engine
4339 * registers/MIPI[BXT]. We can break out here early, since we
4340 * need the same DSI PLL to be enabled for both DSI ports.
4342 if (!bxt_dsi_pll_is_enabled(dev_priv))
4345 /* XXX: this works for video mode only */
4346 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4347 if (!(tmp & DPI_ENABLE))
4350 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4351 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4354 pipe_config->cpu_transcoder = cpu_transcoder;
4358 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4361 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4362 struct intel_crtc_state *pipe_config)
4364 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4365 struct intel_display_power_domain_set power_domain_set = { };
4369 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4370 POWER_DOMAIN_PIPE(crtc->pipe)))
4373 pipe_config->shared_dpll = NULL;
4375 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4377 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4378 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4379 drm_WARN_ON(&dev_priv->drm, active);
4383 intel_dsc_get_config(pipe_config);
4384 if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4385 intel_uncompressed_joiner_get_config(pipe_config);
4390 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4391 DISPLAY_VER(dev_priv) >= 11)
4392 intel_get_transcoder_timings(crtc, pipe_config);
4394 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4395 intel_vrr_get_config(crtc, pipe_config);
4397 intel_get_pipe_src_size(crtc, pipe_config);
4399 if (IS_HASWELL(dev_priv)) {
4400 u32 tmp = intel_de_read(dev_priv,
4401 PIPECONF(pipe_config->cpu_transcoder));
4403 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4404 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4406 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4408 pipe_config->output_format =
4409 bdw_get_pipemisc_output_format(crtc);
4412 pipe_config->gamma_mode = intel_de_read(dev_priv,
4413 GAMMA_MODE(crtc->pipe));
4415 pipe_config->csc_mode = intel_de_read(dev_priv,
4416 PIPE_CSC_MODE(crtc->pipe));
4418 if (DISPLAY_VER(dev_priv) >= 9) {
4419 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4421 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4422 pipe_config->gamma_enable = true;
4424 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4425 pipe_config->csc_enable = true;
4427 i9xx_get_pipe_color_config(pipe_config);
4430 intel_color_get_config(pipe_config);
4432 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4433 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4434 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4435 pipe_config->ips_linetime =
4436 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4438 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4439 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4440 if (DISPLAY_VER(dev_priv) >= 9)
4441 skl_get_pfit_config(pipe_config);
4443 ilk_get_pfit_config(pipe_config);
4446 if (hsw_crtc_supports_ips(crtc)) {
4447 if (IS_HASWELL(dev_priv))
4448 pipe_config->ips_enabled = intel_de_read(dev_priv,
4449 IPS_CTL) & IPS_ENABLE;
4452 * We cannot readout IPS state on broadwell, set to
4453 * true so we can set it to a defined state on first
4456 pipe_config->ips_enabled = true;
4460 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4461 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4462 pipe_config->pixel_multiplier =
4463 intel_de_read(dev_priv,
4464 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4466 pipe_config->pixel_multiplier = 1;
4470 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4475 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4477 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4478 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4480 if (!i915->display->get_pipe_config(crtc, crtc_state))
4483 crtc_state->hw.active = true;
4485 intel_crtc_readout_derived_state(crtc_state);
4490 /* VESA 640x480x72Hz mode to set on the pipe */
4491 static const struct drm_display_mode load_detect_mode = {
4492 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4493 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4496 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4497 struct drm_crtc *crtc)
4499 struct drm_plane *plane;
4500 struct drm_plane_state *plane_state;
4503 ret = drm_atomic_add_affected_planes(state, crtc);
4507 for_each_new_plane_in_state(state, plane, plane_state, i) {
4508 if (plane_state->crtc != crtc)
4511 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4515 drm_atomic_set_fb_for_plane(plane_state, NULL);
4521 int intel_get_load_detect_pipe(struct drm_connector *connector,
4522 struct intel_load_detect_pipe *old,
4523 struct drm_modeset_acquire_ctx *ctx)
4525 struct intel_encoder *encoder =
4526 intel_attached_encoder(to_intel_connector(connector));
4527 struct intel_crtc *possible_crtc;
4528 struct intel_crtc *crtc = NULL;
4529 struct drm_device *dev = encoder->base.dev;
4530 struct drm_i915_private *dev_priv = to_i915(dev);
4531 struct drm_mode_config *config = &dev->mode_config;
4532 struct drm_atomic_state *state = NULL, *restore_state = NULL;
4533 struct drm_connector_state *connector_state;
4534 struct intel_crtc_state *crtc_state;
4537 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4538 connector->base.id, connector->name,
4539 encoder->base.base.id, encoder->base.name);
4541 old->restore_state = NULL;
4543 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4546 * Algorithm gets a little messy:
4548 * - if the connector already has an assigned crtc, use it (but make
4549 * sure it's on first)
4551 * - try to find the first unused crtc that can drive this connector,
4552 * and use that if we find one
4555 /* See if we already have a CRTC for this connector */
4556 if (connector->state->crtc) {
4557 crtc = to_intel_crtc(connector->state->crtc);
4559 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4563 /* Make sure the crtc and connector are running */
4567 /* Find an unused one (if possible) */
4568 for_each_intel_crtc(dev, possible_crtc) {
4569 if (!(encoder->base.possible_crtcs &
4570 drm_crtc_mask(&possible_crtc->base)))
4573 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4577 if (possible_crtc->base.state->enable) {
4578 drm_modeset_unlock(&possible_crtc->base.mutex);
4582 crtc = possible_crtc;
4587 * If we didn't find an unused CRTC, don't use any.
4590 drm_dbg_kms(&dev_priv->drm,
4591 "no pipe available for load-detect\n");
4597 state = drm_atomic_state_alloc(dev);
4598 restore_state = drm_atomic_state_alloc(dev);
4599 if (!state || !restore_state) {
4604 state->acquire_ctx = ctx;
4605 restore_state->acquire_ctx = ctx;
4607 connector_state = drm_atomic_get_connector_state(state, connector);
4608 if (IS_ERR(connector_state)) {
4609 ret = PTR_ERR(connector_state);
4613 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4617 crtc_state = intel_atomic_get_crtc_state(state, crtc);
4618 if (IS_ERR(crtc_state)) {
4619 ret = PTR_ERR(crtc_state);
4623 crtc_state->uapi.active = true;
4625 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4630 ret = intel_modeset_disable_planes(state, &crtc->base);
4634 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4636 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4638 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4640 drm_dbg_kms(&dev_priv->drm,
4641 "Failed to create a copy of old state to restore: %i\n",
4646 ret = drm_atomic_commit(state);
4648 drm_dbg_kms(&dev_priv->drm,
4649 "failed to set mode on load-detect pipe\n");
4653 old->restore_state = restore_state;
4654 drm_atomic_state_put(state);
4656 /* let the connector get through one full cycle before testing */
4657 intel_crtc_wait_for_next_vblank(crtc);
4663 drm_atomic_state_put(state);
4666 if (restore_state) {
4667 drm_atomic_state_put(restore_state);
4668 restore_state = NULL;
4671 if (ret == -EDEADLK)
4677 void intel_release_load_detect_pipe(struct drm_connector *connector,
4678 struct intel_load_detect_pipe *old,
4679 struct drm_modeset_acquire_ctx *ctx)
4681 struct intel_encoder *intel_encoder =
4682 intel_attached_encoder(to_intel_connector(connector));
4683 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4684 struct drm_encoder *encoder = &intel_encoder->base;
4685 struct drm_atomic_state *state = old->restore_state;
4688 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4689 connector->base.id, connector->name,
4690 encoder->base.id, encoder->name);
4695 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4697 drm_dbg_kms(&i915->drm,
4698 "Couldn't release load detect pipe: %i\n", ret);
4699 drm_atomic_state_put(state);
4702 static int i9xx_pll_refclk(struct drm_device *dev,
4703 const struct intel_crtc_state *pipe_config)
4705 struct drm_i915_private *dev_priv = to_i915(dev);
4706 u32 dpll = pipe_config->dpll_hw_state.dpll;
4708 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4709 return dev_priv->vbt.lvds_ssc_freq;
4710 else if (HAS_PCH_SPLIT(dev_priv))
4712 else if (DISPLAY_VER(dev_priv) != 2)
4718 /* Returns the clock of the currently programmed mode of the given pipe. */
4719 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4720 struct intel_crtc_state *pipe_config)
4722 struct drm_device *dev = crtc->base.dev;
4723 struct drm_i915_private *dev_priv = to_i915(dev);
4724 u32 dpll = pipe_config->dpll_hw_state.dpll;
4728 int refclk = i9xx_pll_refclk(dev, pipe_config);
4730 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4731 fp = pipe_config->dpll_hw_state.fp0;
4733 fp = pipe_config->dpll_hw_state.fp1;
4735 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4736 if (IS_PINEVIEW(dev_priv)) {
4737 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4738 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4740 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4741 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4744 if (DISPLAY_VER(dev_priv) != 2) {
4745 if (IS_PINEVIEW(dev_priv))
4746 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4747 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4749 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4750 DPLL_FPA01_P1_POST_DIV_SHIFT);
4752 switch (dpll & DPLL_MODE_MASK) {
4753 case DPLLB_MODE_DAC_SERIAL:
4754 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4757 case DPLLB_MODE_LVDS:
4758 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4762 drm_dbg_kms(&dev_priv->drm,
4763 "Unknown DPLL mode %08x in programmed "
4764 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4768 if (IS_PINEVIEW(dev_priv))
4769 port_clock = pnv_calc_dpll_params(refclk, &clock);
4771 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4773 enum pipe lvds_pipe;
4775 if (IS_I85X(dev_priv) &&
4776 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4777 lvds_pipe == crtc->pipe) {
4778 u32 lvds = intel_de_read(dev_priv, LVDS);
4780 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4781 DPLL_FPA01_P1_POST_DIV_SHIFT);
4783 if (lvds & LVDS_CLKB_POWER_UP)
4788 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4791 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4792 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4794 if (dpll & PLL_P2_DIVIDE_BY_4)
4800 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4804 * This value includes pixel_multiplier. We will use
4805 * port_clock to compute adjusted_mode.crtc_clock in the
4806 * encoder's get_config() function.
4808 pipe_config->port_clock = port_clock;
4811 int intel_dotclock_calculate(int link_freq,
4812 const struct intel_link_m_n *m_n)
4815 * The calculation for the data clock is:
4816 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4817 * But we want to avoid losing precison if possible, so:
4818 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4820 * and the link clock is simpler:
4821 * link_clock = (m * link_clock) / n
4827 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4830 /* Returns the currently programmed mode of the given encoder. */
4831 struct drm_display_mode *
4832 intel_encoder_current_mode(struct intel_encoder *encoder)
4834 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4835 struct intel_crtc_state *crtc_state;
4836 struct drm_display_mode *mode;
4837 struct intel_crtc *crtc;
4840 if (!encoder->get_hw_state(encoder, &pipe))
4843 crtc = intel_crtc_for_pipe(dev_priv, pipe);
4845 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4849 crtc_state = intel_crtc_state_alloc(crtc);
4855 if (!intel_crtc_get_pipe_config(crtc_state)) {
4861 intel_encoder_get_config(encoder, crtc_state);
4863 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4871 * intel_wm_need_update - Check whether watermarks need updating
4872 * @cur: current plane state
4873 * @new: new plane state
4875 * Check current plane state versus the new one to determine whether
4876 * watermarks need to be recalculated.
4878 * Returns true or false.
4880 static bool intel_wm_need_update(const struct intel_plane_state *cur,
4881 struct intel_plane_state *new)
4883 /* Update watermarks on tiling or size changes. */
4884 if (new->uapi.visible != cur->uapi.visible)
4887 if (!cur->hw.fb || !new->hw.fb)
4890 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4891 cur->hw.rotation != new->hw.rotation ||
4892 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4893 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4894 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4895 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4901 static bool needs_scaling(const struct intel_plane_state *state)
4903 int src_w = drm_rect_width(&state->uapi.src) >> 16;
4904 int src_h = drm_rect_height(&state->uapi.src) >> 16;
4905 int dst_w = drm_rect_width(&state->uapi.dst);
4906 int dst_h = drm_rect_height(&state->uapi.dst);
4908 return (src_w != dst_w || src_h != dst_h);
4911 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4912 struct intel_crtc_state *new_crtc_state,
4913 const struct intel_plane_state *old_plane_state,
4914 struct intel_plane_state *new_plane_state)
4916 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4917 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4918 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4919 bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4920 bool was_crtc_enabled = old_crtc_state->hw.active;
4921 bool is_crtc_enabled = new_crtc_state->hw.active;
4922 bool turn_off, turn_on, visible, was_visible;
4925 if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4926 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4931 was_visible = old_plane_state->uapi.visible;
4932 visible = new_plane_state->uapi.visible;
4934 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4935 was_visible = false;
4938 * Visibility is calculated as if the crtc was on, but
4939 * after scaler setup everything depends on it being off
4940 * when the crtc isn't active.
4942 * FIXME this is wrong for watermarks. Watermarks should also
4943 * be computed as if the pipe would be active. Perhaps move
4944 * per-plane wm computation to the .check_plane() hook, and
4945 * only combine the results from all planes in the current place?
4947 if (!is_crtc_enabled) {
4948 intel_plane_set_invisible(new_crtc_state, new_plane_state);
4952 if (!was_visible && !visible)
4955 turn_off = was_visible && (!visible || mode_changed);
4956 turn_on = visible && (!was_visible || mode_changed);
4958 drm_dbg_atomic(&dev_priv->drm,
4959 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4960 crtc->base.base.id, crtc->base.name,
4961 plane->base.base.id, plane->base.name,
4962 was_visible, visible,
4963 turn_off, turn_on, mode_changed);
4966 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4967 new_crtc_state->update_wm_pre = true;
4969 /* must disable cxsr around plane enable/disable */
4970 if (plane->id != PLANE_CURSOR)
4971 new_crtc_state->disable_cxsr = true;
4972 } else if (turn_off) {
4973 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4974 new_crtc_state->update_wm_post = true;
4976 /* must disable cxsr around plane enable/disable */
4977 if (plane->id != PLANE_CURSOR)
4978 new_crtc_state->disable_cxsr = true;
4979 } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
4980 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
4981 /* FIXME bollocks */
4982 new_crtc_state->update_wm_pre = true;
4983 new_crtc_state->update_wm_post = true;
4987 if (visible || was_visible)
4988 new_crtc_state->fb_bits |= plane->frontbuffer_bit;
4991 * ILK/SNB DVSACNTR/Sprite Enable
4992 * IVB SPR_CTL/Sprite Enable
4993 * "When in Self Refresh Big FIFO mode, a write to enable the
4994 * plane will be internally buffered and delayed while Big FIFO
4997 * Which means that enabling the sprite can take an extra frame
4998 * when we start in big FIFO mode (LP1+). Thus we need to drop
4999 * down to LP0 and wait for vblank in order to make sure the
5000 * sprite gets enabled on the next vblank after the register write.
5001 * Doing otherwise would risk enabling the sprite one frame after
5002 * we've already signalled flip completion. We can resume LP1+
5003 * once the sprite has been enabled.
5006 * WaCxSRDisabledForSpriteScaling:ivb
5007 * IVB SPR_SCALE/Scaling Enable
5008 * "Low Power watermarks must be disabled for at least one
5009 * frame before enabling sprite scaling, and kept disabled
5010 * until sprite scaling is disabled."
5012 * ILK/SNB DVSASCALE/Scaling Enable
5013 * "When in Self Refresh Big FIFO mode, scaling enable will be
5014 * masked off while Big FIFO mode is exiting."
5016 * Despite the w/a only being listed for IVB we assume that
5017 * the ILK/SNB note has similar ramifications, hence we apply
5018 * the w/a on all three platforms.
5020 * With experimental results seems this is needed also for primary
5021 * plane, not only sprite plane.
5023 if (plane->id != PLANE_CURSOR &&
5024 (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5025 IS_IVYBRIDGE(dev_priv)) &&
5026 (turn_on || (!needs_scaling(old_plane_state) &&
5027 needs_scaling(new_plane_state))))
5028 new_crtc_state->disable_lp_wm = true;
5033 static bool encoders_cloneable(const struct intel_encoder *a,
5034 const struct intel_encoder *b)
5036 /* masks could be asymmetric, so check both ways */
5037 return a == b || (a->cloneable & (1 << b->type) &&
5038 b->cloneable & (1 << a->type));
5041 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5042 struct intel_crtc *crtc,
5043 struct intel_encoder *encoder)
5045 struct intel_encoder *source_encoder;
5046 struct drm_connector *connector;
5047 struct drm_connector_state *connector_state;
5050 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5051 if (connector_state->crtc != &crtc->base)
5055 to_intel_encoder(connector_state->best_encoder);
5056 if (!encoders_cloneable(encoder, source_encoder))
5063 static int icl_add_linked_planes(struct intel_atomic_state *state)
5065 struct intel_plane *plane, *linked;
5066 struct intel_plane_state *plane_state, *linked_plane_state;
5069 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5070 linked = plane_state->planar_linked_plane;
5075 linked_plane_state = intel_atomic_get_plane_state(state, linked);
5076 if (IS_ERR(linked_plane_state))
5077 return PTR_ERR(linked_plane_state);
5079 drm_WARN_ON(state->base.dev,
5080 linked_plane_state->planar_linked_plane != plane);
5081 drm_WARN_ON(state->base.dev,
5082 linked_plane_state->planar_slave == plane_state->planar_slave);
5088 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5090 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5091 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5092 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5093 struct intel_plane *plane, *linked;
5094 struct intel_plane_state *plane_state;
5097 if (DISPLAY_VER(dev_priv) < 11)
5101 * Destroy all old plane links and make the slave plane invisible
5102 * in the crtc_state->active_planes mask.
5104 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5105 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5108 plane_state->planar_linked_plane = NULL;
5109 if (plane_state->planar_slave && !plane_state->uapi.visible) {
5110 crtc_state->enabled_planes &= ~BIT(plane->id);
5111 crtc_state->active_planes &= ~BIT(plane->id);
5112 crtc_state->update_planes |= BIT(plane->id);
5115 plane_state->planar_slave = false;
5118 if (!crtc_state->nv12_planes)
5121 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5122 struct intel_plane_state *linked_state = NULL;
5124 if (plane->pipe != crtc->pipe ||
5125 !(crtc_state->nv12_planes & BIT(plane->id)))
5128 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5129 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5132 if (crtc_state->active_planes & BIT(linked->id))
5135 linked_state = intel_atomic_get_plane_state(state, linked);
5136 if (IS_ERR(linked_state))
5137 return PTR_ERR(linked_state);
5142 if (!linked_state) {
5143 drm_dbg_kms(&dev_priv->drm,
5144 "Need %d free Y planes for planar YUV\n",
5145 hweight8(crtc_state->nv12_planes));
5150 plane_state->planar_linked_plane = linked;
5152 linked_state->planar_slave = true;
5153 linked_state->planar_linked_plane = plane;
5154 crtc_state->enabled_planes |= BIT(linked->id);
5155 crtc_state->active_planes |= BIT(linked->id);
5156 crtc_state->update_planes |= BIT(linked->id);
5157 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5158 linked->base.name, plane->base.name);
5160 /* Copy parameters to slave plane */
5161 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5162 linked_state->color_ctl = plane_state->color_ctl;
5163 linked_state->view = plane_state->view;
5164 linked_state->decrypt = plane_state->decrypt;
5166 intel_plane_copy_hw_state(linked_state, plane_state);
5167 linked_state->uapi.src = plane_state->uapi.src;
5168 linked_state->uapi.dst = plane_state->uapi.dst;
5170 if (icl_is_hdr_plane(dev_priv, plane->id)) {
5171 if (linked->id == PLANE_SPRITE5)
5172 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
5173 else if (linked->id == PLANE_SPRITE4)
5174 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
5175 else if (linked->id == PLANE_SPRITE3)
5176 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
5177 else if (linked->id == PLANE_SPRITE2)
5178 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
5180 MISSING_CASE(linked->id);
5187 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5189 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5190 struct intel_atomic_state *state =
5191 to_intel_atomic_state(new_crtc_state->uapi.state);
5192 const struct intel_crtc_state *old_crtc_state =
5193 intel_atomic_get_old_crtc_state(state, crtc);
5195 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5198 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5200 const struct drm_display_mode *pipe_mode =
5201 &crtc_state->hw.pipe_mode;
5204 if (!crtc_state->hw.enable)
5207 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5208 pipe_mode->crtc_clock);
5210 return min(linetime_wm, 0x1ff);
5213 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5214 const struct intel_cdclk_state *cdclk_state)
5216 const struct drm_display_mode *pipe_mode =
5217 &crtc_state->hw.pipe_mode;
5220 if (!crtc_state->hw.enable)
5223 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5224 cdclk_state->logical.cdclk);
5226 return min(linetime_wm, 0x1ff);
5229 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5231 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5232 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5233 const struct drm_display_mode *pipe_mode =
5234 &crtc_state->hw.pipe_mode;
5237 if (!crtc_state->hw.enable)
5240 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5241 crtc_state->pixel_rate);
5243 /* Display WA #1135: BXT:ALL GLK:ALL */
5244 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5245 dev_priv->ipc_enabled)
5248 return min(linetime_wm, 0x1ff);
5251 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5252 struct intel_crtc *crtc)
5254 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5255 struct intel_crtc_state *crtc_state =
5256 intel_atomic_get_new_crtc_state(state, crtc);
5257 const struct intel_cdclk_state *cdclk_state;
5259 if (DISPLAY_VER(dev_priv) >= 9)
5260 crtc_state->linetime = skl_linetime_wm(crtc_state);
5262 crtc_state->linetime = hsw_linetime_wm(crtc_state);
5264 if (!hsw_crtc_supports_ips(crtc))
5267 cdclk_state = intel_atomic_get_cdclk_state(state);
5268 if (IS_ERR(cdclk_state))
5269 return PTR_ERR(cdclk_state);
5271 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5277 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5278 struct intel_crtc *crtc)
5280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5281 struct intel_crtc_state *crtc_state =
5282 intel_atomic_get_new_crtc_state(state, crtc);
5283 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5286 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5287 mode_changed && !crtc_state->hw.active)
5288 crtc_state->update_wm_post = true;
5290 if (mode_changed && crtc_state->hw.enable &&
5291 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5292 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5298 * May need to update pipe gamma enable bits
5299 * when C8 planes are getting enabled/disabled.
5301 if (c8_planes_changed(crtc_state))
5302 crtc_state->uapi.color_mgmt_changed = true;
5304 if (mode_changed || crtc_state->update_pipe ||
5305 crtc_state->uapi.color_mgmt_changed) {
5306 ret = intel_color_check(crtc_state);
5311 ret = intel_compute_pipe_wm(state, crtc);
5313 drm_dbg_kms(&dev_priv->drm,
5314 "Target pipe watermarks are invalid\n");
5319 * Calculate 'intermediate' watermarks that satisfy both the
5320 * old state and the new state. We can program these
5323 ret = intel_compute_intermediate_wm(state, crtc);
5325 drm_dbg_kms(&dev_priv->drm,
5326 "No valid intermediate pipe watermarks are possible\n");
5330 if (DISPLAY_VER(dev_priv) >= 9) {
5331 if (mode_changed || crtc_state->update_pipe) {
5332 ret = skl_update_scaler_crtc(crtc_state);
5337 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5342 if (HAS_IPS(dev_priv)) {
5343 ret = hsw_compute_ips_config(crtc_state);
5348 if (DISPLAY_VER(dev_priv) >= 9 ||
5349 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5350 ret = hsw_compute_linetime_wm(state, crtc);
5356 ret = intel_psr2_sel_fetch_update(state, crtc);
5363 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5365 struct intel_connector *connector;
5366 struct drm_connector_list_iter conn_iter;
5368 drm_connector_list_iter_begin(dev, &conn_iter);
5369 for_each_intel_connector_iter(connector, &conn_iter) {
5370 struct drm_connector_state *conn_state = connector->base.state;
5371 struct intel_encoder *encoder =
5372 to_intel_encoder(connector->base.encoder);
5374 if (conn_state->crtc)
5375 drm_connector_put(&connector->base);
5378 struct intel_crtc *crtc =
5379 to_intel_crtc(encoder->base.crtc);
5380 const struct intel_crtc_state *crtc_state =
5381 to_intel_crtc_state(crtc->base.state);
5383 conn_state->best_encoder = &encoder->base;
5384 conn_state->crtc = &crtc->base;
5385 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5387 drm_connector_get(&connector->base);
5389 conn_state->best_encoder = NULL;
5390 conn_state->crtc = NULL;
5393 drm_connector_list_iter_end(&conn_iter);
5397 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5398 struct intel_crtc_state *pipe_config)
5400 struct drm_connector *connector = conn_state->connector;
5401 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5402 const struct drm_display_info *info = &connector->display_info;
5405 switch (conn_state->max_bpc) {
5419 MISSING_CASE(conn_state->max_bpc);
5423 if (bpp < pipe_config->pipe_bpp) {
5424 drm_dbg_kms(&i915->drm,
5425 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5426 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5427 connector->base.id, connector->name,
5429 3 * conn_state->max_requested_bpc,
5430 pipe_config->pipe_bpp);
5432 pipe_config->pipe_bpp = bpp;
5439 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5440 struct intel_crtc_state *pipe_config)
5442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5443 struct drm_atomic_state *state = pipe_config->uapi.state;
5444 struct drm_connector *connector;
5445 struct drm_connector_state *connector_state;
5448 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5449 IS_CHERRYVIEW(dev_priv)))
5451 else if (DISPLAY_VER(dev_priv) >= 5)
5456 pipe_config->pipe_bpp = bpp;
5458 /* Clamp display bpp to connector max bpp */
5459 for_each_new_connector_in_state(state, connector, connector_state, i) {
5462 if (connector_state->crtc != &crtc->base)
5465 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5473 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5474 const struct drm_display_mode *mode)
5476 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5477 "type: 0x%x flags: 0x%x\n",
5479 mode->crtc_hdisplay, mode->crtc_hsync_start,
5480 mode->crtc_hsync_end, mode->crtc_htotal,
5481 mode->crtc_vdisplay, mode->crtc_vsync_start,
5482 mode->crtc_vsync_end, mode->crtc_vtotal,
5483 mode->type, mode->flags);
5487 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5488 const char *id, unsigned int lane_count,
5489 const struct intel_link_m_n *m_n)
5491 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5493 drm_dbg_kms(&i915->drm,
5494 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5496 m_n->gmch_m, m_n->gmch_n,
5497 m_n->link_m, m_n->link_n, m_n->tu);
5501 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5502 const union hdmi_infoframe *frame)
5504 if (!drm_debug_enabled(DRM_UT_KMS))
5507 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5511 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5512 const struct drm_dp_vsc_sdp *vsc)
5514 if (!drm_debug_enabled(DRM_UT_KMS))
5517 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5520 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5522 static const char * const output_type_str[] = {
5523 OUTPUT_TYPE(UNUSED),
5524 OUTPUT_TYPE(ANALOG),
5534 OUTPUT_TYPE(DP_MST),
5539 static void snprintf_output_types(char *buf, size_t len,
5540 unsigned int output_types)
5547 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5550 if ((output_types & BIT(i)) == 0)
5553 r = snprintf(str, len, "%s%s",
5554 str != buf ? "," : "", output_type_str[i]);
5560 output_types &= ~BIT(i);
5563 WARN_ON_ONCE(output_types != 0);
5566 static const char * const output_format_str[] = {
5567 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5568 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5569 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5572 static const char *output_formats(enum intel_output_format format)
5574 if (format >= ARRAY_SIZE(output_format_str))
5576 return output_format_str[format];
5579 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5581 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5582 struct drm_i915_private *i915 = to_i915(plane->base.dev);
5583 const struct drm_framebuffer *fb = plane_state->hw.fb;
5586 drm_dbg_kms(&i915->drm,
5587 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5588 plane->base.base.id, plane->base.name,
5589 yesno(plane_state->uapi.visible));
5593 drm_dbg_kms(&i915->drm,
5594 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5595 plane->base.base.id, plane->base.name,
5596 fb->base.id, fb->width, fb->height, &fb->format->format,
5597 fb->modifier, yesno(plane_state->uapi.visible));
5598 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5599 plane_state->hw.rotation, plane_state->scaler_id);
5600 if (plane_state->uapi.visible)
5601 drm_dbg_kms(&i915->drm,
5602 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5603 DRM_RECT_FP_ARG(&plane_state->uapi.src),
5604 DRM_RECT_ARG(&plane_state->uapi.dst));
5607 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5608 struct intel_atomic_state *state,
5609 const char *context)
5611 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5612 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5613 const struct intel_plane_state *plane_state;
5614 struct intel_plane *plane;
5618 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5619 crtc->base.base.id, crtc->base.name,
5620 yesno(pipe_config->hw.enable), context);
5622 if (!pipe_config->hw.enable)
5625 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5626 drm_dbg_kms(&dev_priv->drm,
5627 "active: %s, output_types: %s (0x%x), output format: %s\n",
5628 yesno(pipe_config->hw.active),
5629 buf, pipe_config->output_types,
5630 output_formats(pipe_config->output_format));
5632 drm_dbg_kms(&dev_priv->drm,
5633 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5634 transcoder_name(pipe_config->cpu_transcoder),
5635 pipe_config->pipe_bpp, pipe_config->dither);
5637 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5638 transcoder_name(pipe_config->mst_master_transcoder));
5640 drm_dbg_kms(&dev_priv->drm,
5641 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5642 transcoder_name(pipe_config->master_transcoder),
5643 pipe_config->sync_mode_slaves_mask);
5645 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5646 pipe_config->bigjoiner_slave ? "slave" :
5647 pipe_config->bigjoiner ? "master" : "no");
5649 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5650 enableddisabled(pipe_config->splitter.enable),
5651 pipe_config->splitter.link_count,
5652 pipe_config->splitter.pixel_overlap);
5654 if (pipe_config->has_pch_encoder)
5655 intel_dump_m_n_config(pipe_config, "fdi",
5656 pipe_config->fdi_lanes,
5657 &pipe_config->fdi_m_n);
5659 if (intel_crtc_has_dp_encoder(pipe_config)) {
5660 intel_dump_m_n_config(pipe_config, "dp m_n",
5661 pipe_config->lane_count, &pipe_config->dp_m_n);
5662 if (pipe_config->has_drrs)
5663 intel_dump_m_n_config(pipe_config, "dp m2_n2",
5664 pipe_config->lane_count,
5665 &pipe_config->dp_m2_n2);
5668 drm_dbg_kms(&dev_priv->drm,
5669 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5670 pipe_config->has_audio, pipe_config->has_infoframe,
5671 pipe_config->infoframes.enable);
5673 if (pipe_config->infoframes.enable &
5674 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5675 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5676 pipe_config->infoframes.gcp);
5677 if (pipe_config->infoframes.enable &
5678 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5679 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5680 if (pipe_config->infoframes.enable &
5681 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5682 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5683 if (pipe_config->infoframes.enable &
5684 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5685 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5686 if (pipe_config->infoframes.enable &
5687 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5688 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5689 if (pipe_config->infoframes.enable &
5690 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5691 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5692 if (pipe_config->infoframes.enable &
5693 intel_hdmi_infoframe_enable(DP_SDP_VSC))
5694 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5696 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5697 yesno(pipe_config->vrr.enable),
5698 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5699 pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5700 pipe_config->vrr.flipline,
5701 intel_vrr_vmin_vblank_start(pipe_config),
5702 intel_vrr_vmax_vblank_start(pipe_config));
5704 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5705 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5706 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5707 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5708 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5709 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5710 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5711 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5712 drm_dbg_kms(&dev_priv->drm,
5713 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5714 pipe_config->port_clock,
5715 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5716 pipe_config->pixel_rate);
5718 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5719 pipe_config->linetime, pipe_config->ips_linetime);
5721 if (DISPLAY_VER(dev_priv) >= 9)
5722 drm_dbg_kms(&dev_priv->drm,
5723 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5725 pipe_config->scaler_state.scaler_users,
5726 pipe_config->scaler_state.scaler_id);
5728 if (HAS_GMCH(dev_priv))
5729 drm_dbg_kms(&dev_priv->drm,
5730 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5731 pipe_config->gmch_pfit.control,
5732 pipe_config->gmch_pfit.pgm_ratios,
5733 pipe_config->gmch_pfit.lvds_border_bits);
5735 drm_dbg_kms(&dev_priv->drm,
5736 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5737 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5738 enableddisabled(pipe_config->pch_pfit.enabled),
5739 yesno(pipe_config->pch_pfit.force_thru));
5741 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5742 pipe_config->ips_enabled, pipe_config->double_wide);
5744 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5746 if (IS_CHERRYVIEW(dev_priv))
5747 drm_dbg_kms(&dev_priv->drm,
5748 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5749 pipe_config->cgm_mode, pipe_config->gamma_mode,
5750 pipe_config->gamma_enable, pipe_config->csc_enable);
5752 drm_dbg_kms(&dev_priv->drm,
5753 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5754 pipe_config->csc_mode, pipe_config->gamma_mode,
5755 pipe_config->gamma_enable, pipe_config->csc_enable);
5757 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5758 pipe_config->hw.degamma_lut ?
5759 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5760 pipe_config->hw.gamma_lut ?
5761 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5767 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5768 if (plane->pipe == crtc->pipe)
5769 intel_dump_plane_state(plane_state);
5773 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5775 struct drm_device *dev = state->base.dev;
5776 struct drm_connector *connector;
5777 struct drm_connector_list_iter conn_iter;
5778 unsigned int used_ports = 0;
5779 unsigned int used_mst_ports = 0;
5783 * We're going to peek into connector->state,
5784 * hence connection_mutex must be held.
5786 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5789 * Walk the connector list instead of the encoder
5790 * list to detect the problem on ddi platforms
5791 * where there's just one encoder per digital port.
5793 drm_connector_list_iter_begin(dev, &conn_iter);
5794 drm_for_each_connector_iter(connector, &conn_iter) {
5795 struct drm_connector_state *connector_state;
5796 struct intel_encoder *encoder;
5799 drm_atomic_get_new_connector_state(&state->base,
5801 if (!connector_state)
5802 connector_state = connector->state;
5804 if (!connector_state->best_encoder)
5807 encoder = to_intel_encoder(connector_state->best_encoder);
5809 drm_WARN_ON(dev, !connector_state->crtc);
5811 switch (encoder->type) {
5812 case INTEL_OUTPUT_DDI:
5813 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5816 case INTEL_OUTPUT_DP:
5817 case INTEL_OUTPUT_HDMI:
5818 case INTEL_OUTPUT_EDP:
5819 /* the same port mustn't appear more than once */
5820 if (used_ports & BIT(encoder->port))
5823 used_ports |= BIT(encoder->port);
5825 case INTEL_OUTPUT_DP_MST:
5833 drm_connector_list_iter_end(&conn_iter);
5835 /* can't mix MST and SST/HDMI on the same port */
5836 if (used_ports & used_mst_ports)
5843 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5844 struct intel_crtc_state *crtc_state)
5846 const struct intel_crtc_state *master_crtc_state;
5847 struct intel_crtc *master_crtc;
5849 master_crtc = intel_master_crtc(crtc_state);
5850 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5852 /* No need to copy state if the master state is unchanged */
5853 if (master_crtc_state)
5854 intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5858 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5859 struct intel_crtc_state *crtc_state)
5861 crtc_state->hw.enable = crtc_state->uapi.enable;
5862 crtc_state->hw.active = crtc_state->uapi.active;
5863 crtc_state->hw.mode = crtc_state->uapi.mode;
5864 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5865 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5867 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5870 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5872 if (crtc_state->bigjoiner_slave)
5875 crtc_state->uapi.enable = crtc_state->hw.enable;
5876 crtc_state->uapi.active = crtc_state->hw.active;
5877 drm_WARN_ON(crtc_state->uapi.crtc->dev,
5878 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5880 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5881 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5883 /* copy color blobs to uapi */
5884 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5885 crtc_state->hw.degamma_lut);
5886 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5887 crtc_state->hw.gamma_lut);
5888 drm_property_replace_blob(&crtc_state->uapi.ctm,
5889 crtc_state->hw.ctm);
5893 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5894 const struct intel_crtc_state *from_crtc_state)
5896 struct intel_crtc_state *saved_state;
5898 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5902 saved_state->uapi = crtc_state->uapi;
5903 saved_state->scaler_state = crtc_state->scaler_state;
5904 saved_state->shared_dpll = crtc_state->shared_dpll;
5905 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5906 saved_state->crc_enabled = crtc_state->crc_enabled;
5908 intel_crtc_free_hw_state(crtc_state);
5909 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5912 /* Re-init hw state */
5913 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5914 crtc_state->hw.enable = from_crtc_state->hw.enable;
5915 crtc_state->hw.active = from_crtc_state->hw.active;
5916 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5917 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5920 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5921 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5922 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5923 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5924 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5925 crtc_state->bigjoiner_slave = true;
5926 crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5927 crtc_state->has_audio = from_crtc_state->has_audio;
5933 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5934 struct intel_crtc_state *crtc_state)
5936 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5937 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5938 struct intel_crtc_state *saved_state;
5940 saved_state = intel_crtc_state_alloc(crtc);
5944 /* free the old crtc_state->hw members */
5945 intel_crtc_free_hw_state(crtc_state);
5947 /* FIXME: before the switch to atomic started, a new pipe_config was
5948 * kzalloc'd. Code that depends on any field being zero should be
5949 * fixed, so that the crtc_state can be safely duplicated. For now,
5950 * only fields that are know to not cause problems are preserved. */
5952 saved_state->uapi = crtc_state->uapi;
5953 saved_state->scaler_state = crtc_state->scaler_state;
5954 saved_state->shared_dpll = crtc_state->shared_dpll;
5955 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5956 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5957 sizeof(saved_state->icl_port_dplls));
5958 saved_state->crc_enabled = crtc_state->crc_enabled;
5959 if (IS_G4X(dev_priv) ||
5960 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5961 saved_state->wm = crtc_state->wm;
5963 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5966 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5972 intel_modeset_pipe_config(struct intel_atomic_state *state,
5973 struct intel_crtc_state *pipe_config)
5975 struct drm_crtc *crtc = pipe_config->uapi.crtc;
5976 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5977 struct drm_connector *connector;
5978 struct drm_connector_state *connector_state;
5979 int base_bpp, ret, i;
5982 pipe_config->cpu_transcoder =
5983 (enum transcoder) to_intel_crtc(crtc)->pipe;
5986 * Sanitize sync polarity flags based on requested ones. If neither
5987 * positive or negative polarity is requested, treat this as meaning
5988 * negative polarity.
5990 if (!(pipe_config->hw.adjusted_mode.flags &
5991 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5992 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5994 if (!(pipe_config->hw.adjusted_mode.flags &
5995 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5996 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5998 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
6003 base_bpp = pipe_config->pipe_bpp;
6006 * Determine the real pipe dimensions. Note that stereo modes can
6007 * increase the actual pipe size due to the frame doubling and
6008 * insertion of additional space for blanks between the frame. This
6009 * is stored in the crtc timings. We use the requested mode to do this
6010 * computation to clearly distinguish it from the adjusted mode, which
6011 * can be changed by the connectors in the below retry loop.
6013 drm_mode_get_hv_timing(&pipe_config->hw.mode,
6014 &pipe_config->pipe_src_w,
6015 &pipe_config->pipe_src_h);
6017 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6018 struct intel_encoder *encoder =
6019 to_intel_encoder(connector_state->best_encoder);
6021 if (connector_state->crtc != crtc)
6024 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6025 drm_dbg_kms(&i915->drm,
6026 "rejecting invalid cloning configuration\n");
6031 * Determine output_types before calling the .compute_config()
6032 * hooks so that the hooks can use this information safely.
6034 if (encoder->compute_output_type)
6035 pipe_config->output_types |=
6036 BIT(encoder->compute_output_type(encoder, pipe_config,
6039 pipe_config->output_types |= BIT(encoder->type);
6043 /* Ensure the port clock defaults are reset when retrying. */
6044 pipe_config->port_clock = 0;
6045 pipe_config->pixel_multiplier = 1;
6047 /* Fill in default crtc timings, allow encoders to overwrite them. */
6048 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6049 CRTC_STEREO_DOUBLE);
6051 /* Pass our mode to the connectors and the CRTC to give them a chance to
6052 * adjust it according to limitations or connector properties, and also
6053 * a chance to reject the mode entirely.
6055 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6056 struct intel_encoder *encoder =
6057 to_intel_encoder(connector_state->best_encoder);
6059 if (connector_state->crtc != crtc)
6062 ret = encoder->compute_config(encoder, pipe_config,
6064 if (ret == -EDEADLK)
6067 drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6072 /* Set default port clock if not overwritten by the encoder. Needs to be
6073 * done afterwards in case the encoder adjusts the mode. */
6074 if (!pipe_config->port_clock)
6075 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6076 * pipe_config->pixel_multiplier;
6078 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6079 if (ret == -EDEADLK)
6081 if (ret == -EAGAIN) {
6082 if (drm_WARN(&i915->drm, !retry,
6083 "loop in pipe configuration computation\n"))
6086 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6091 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6095 /* Dithering seems to not pass-through bits correctly when it should, so
6096 * only enable it on 6bpc panels and when its not a compliance
6097 * test requesting 6bpc video pattern.
6099 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6100 !pipe_config->dither_force_disable;
6101 drm_dbg_kms(&i915->drm,
6102 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6103 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6109 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6111 struct intel_atomic_state *state =
6112 to_intel_atomic_state(crtc_state->uapi.state);
6113 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6114 struct drm_connector_state *conn_state;
6115 struct drm_connector *connector;
6118 for_each_new_connector_in_state(&state->base, connector,
6120 struct intel_encoder *encoder =
6121 to_intel_encoder(conn_state->best_encoder);
6124 if (conn_state->crtc != &crtc->base ||
6125 !encoder->compute_config_late)
6128 ret = encoder->compute_config_late(encoder, crtc_state,
6137 bool intel_fuzzy_clock_check(int clock1, int clock2)
6141 if (clock1 == clock2)
6144 if (!clock1 || !clock2)
6147 diff = abs(clock1 - clock2);
6149 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6156 intel_compare_m_n(unsigned int m, unsigned int n,
6157 unsigned int m2, unsigned int n2,
6160 if (m == m2 && n == n2)
6163 if (exact || !m || !n || !m2 || !n2)
6166 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6173 } else if (n < n2) {
6183 return intel_fuzzy_clock_check(m, m2);
6187 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6188 const struct intel_link_m_n *m2_n2,
6191 return m_n->tu == m2_n2->tu &&
6192 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
6193 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
6194 intel_compare_m_n(m_n->link_m, m_n->link_n,
6195 m2_n2->link_m, m2_n2->link_n, exact);
6199 intel_compare_infoframe(const union hdmi_infoframe *a,
6200 const union hdmi_infoframe *b)
6202 return memcmp(a, b, sizeof(*a)) == 0;
6206 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6207 const struct drm_dp_vsc_sdp *b)
6209 return memcmp(a, b, sizeof(*a)) == 0;
6213 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6214 bool fastset, const char *name,
6215 const union hdmi_infoframe *a,
6216 const union hdmi_infoframe *b)
6219 if (!drm_debug_enabled(DRM_UT_KMS))
6222 drm_dbg_kms(&dev_priv->drm,
6223 "fastset mismatch in %s infoframe\n", name);
6224 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6225 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6226 drm_dbg_kms(&dev_priv->drm, "found:\n");
6227 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6229 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6230 drm_err(&dev_priv->drm, "expected:\n");
6231 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6232 drm_err(&dev_priv->drm, "found:\n");
6233 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6238 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6239 bool fastset, const char *name,
6240 const struct drm_dp_vsc_sdp *a,
6241 const struct drm_dp_vsc_sdp *b)
6244 if (!drm_debug_enabled(DRM_UT_KMS))
6247 drm_dbg_kms(&dev_priv->drm,
6248 "fastset mismatch in %s dp sdp\n", name);
6249 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6250 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6251 drm_dbg_kms(&dev_priv->drm, "found:\n");
6252 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6254 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6255 drm_err(&dev_priv->drm, "expected:\n");
6256 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6257 drm_err(&dev_priv->drm, "found:\n");
6258 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6262 static void __printf(4, 5)
6263 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6264 const char *name, const char *format, ...)
6266 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6267 struct va_format vaf;
6270 va_start(args, format);
6275 drm_dbg_kms(&i915->drm,
6276 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6277 crtc->base.base.id, crtc->base.name, name, &vaf);
6279 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6280 crtc->base.base.id, crtc->base.name, name, &vaf);
6285 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6287 if (dev_priv->params.fastboot != -1)
6288 return dev_priv->params.fastboot;
6290 /* Enable fastboot by default on Skylake and newer */
6291 if (DISPLAY_VER(dev_priv) >= 9)
6294 /* Enable fastboot by default on VLV and CHV */
6295 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6298 /* Disabled by default on all others */
6303 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6304 const struct intel_crtc_state *pipe_config,
6307 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6308 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6311 bool fixup_inherited = fastset &&
6312 current_config->inherited && !pipe_config->inherited;
6314 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6315 drm_dbg_kms(&dev_priv->drm,
6316 "initial modeset and fastboot not set\n");
6320 #define PIPE_CONF_CHECK_X(name) do { \
6321 if (current_config->name != pipe_config->name) { \
6322 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6323 "(expected 0x%08x, found 0x%08x)", \
6324 current_config->name, \
6325 pipe_config->name); \
6330 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6331 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6332 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6333 "(expected 0x%08x, found 0x%08x)", \
6334 current_config->name & (mask), \
6335 pipe_config->name & (mask)); \
6340 #define PIPE_CONF_CHECK_I(name) do { \
6341 if (current_config->name != pipe_config->name) { \
6342 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6343 "(expected %i, found %i)", \
6344 current_config->name, \
6345 pipe_config->name); \
6350 #define PIPE_CONF_CHECK_BOOL(name) do { \
6351 if (current_config->name != pipe_config->name) { \
6352 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6353 "(expected %s, found %s)", \
6354 yesno(current_config->name), \
6355 yesno(pipe_config->name)); \
6361 * Checks state where we only read out the enabling, but not the entire
6362 * state itself (like full infoframes or ELD for audio). These states
6363 * require a full modeset on bootup to fix up.
6365 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6366 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6367 PIPE_CONF_CHECK_BOOL(name); \
6369 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6370 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6371 yesno(current_config->name), \
6372 yesno(pipe_config->name)); \
6377 #define PIPE_CONF_CHECK_P(name) do { \
6378 if (current_config->name != pipe_config->name) { \
6379 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6380 "(expected %p, found %p)", \
6381 current_config->name, \
6382 pipe_config->name); \
6387 #define PIPE_CONF_CHECK_M_N(name) do { \
6388 if (!intel_compare_link_m_n(¤t_config->name, \
6389 &pipe_config->name,\
6391 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6392 "(expected tu %i gmch %i/%i link %i/%i, " \
6393 "found tu %i, gmch %i/%i link %i/%i)", \
6394 current_config->name.tu, \
6395 current_config->name.gmch_m, \
6396 current_config->name.gmch_n, \
6397 current_config->name.link_m, \
6398 current_config->name.link_n, \
6399 pipe_config->name.tu, \
6400 pipe_config->name.gmch_m, \
6401 pipe_config->name.gmch_n, \
6402 pipe_config->name.link_m, \
6403 pipe_config->name.link_n); \
6408 /* This is required for BDW+ where there is only one set of registers for
6409 * switching between high and low RR.
6410 * This macro can be used whenever a comparison has to be made between one
6411 * hw state and multiple sw state variables.
6413 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6414 if (!intel_compare_link_m_n(¤t_config->name, \
6415 &pipe_config->name, !fastset) && \
6416 !intel_compare_link_m_n(¤t_config->alt_name, \
6417 &pipe_config->name, !fastset)) { \
6418 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6419 "(expected tu %i gmch %i/%i link %i/%i, " \
6420 "or tu %i gmch %i/%i link %i/%i, " \
6421 "found tu %i, gmch %i/%i link %i/%i)", \
6422 current_config->name.tu, \
6423 current_config->name.gmch_m, \
6424 current_config->name.gmch_n, \
6425 current_config->name.link_m, \
6426 current_config->name.link_n, \
6427 current_config->alt_name.tu, \
6428 current_config->alt_name.gmch_m, \
6429 current_config->alt_name.gmch_n, \
6430 current_config->alt_name.link_m, \
6431 current_config->alt_name.link_n, \
6432 pipe_config->name.tu, \
6433 pipe_config->name.gmch_m, \
6434 pipe_config->name.gmch_n, \
6435 pipe_config->name.link_m, \
6436 pipe_config->name.link_n); \
6441 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6442 if ((current_config->name ^ pipe_config->name) & (mask)) { \
6443 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6444 "(%x) (expected %i, found %i)", \
6446 current_config->name & (mask), \
6447 pipe_config->name & (mask)); \
6452 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6453 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6454 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6455 "(expected %i, found %i)", \
6456 current_config->name, \
6457 pipe_config->name); \
6462 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6463 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
6464 &pipe_config->infoframes.name)) { \
6465 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6466 ¤t_config->infoframes.name, \
6467 &pipe_config->infoframes.name); \
6472 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6473 if (!current_config->has_psr && !pipe_config->has_psr && \
6474 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
6475 &pipe_config->infoframes.name)) { \
6476 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6477 ¤t_config->infoframes.name, \
6478 &pipe_config->infoframes.name); \
6483 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6484 if (current_config->name1 != pipe_config->name1) { \
6485 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6486 "(expected %i, found %i, won't compare lut values)", \
6487 current_config->name1, \
6488 pipe_config->name1); \
6491 if (!intel_color_lut_equal(current_config->name2, \
6492 pipe_config->name2, pipe_config->name1, \
6494 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6495 "hw_state doesn't match sw_state"); \
6501 #define PIPE_CONF_QUIRK(quirk) \
6502 ((current_config->quirks | pipe_config->quirks) & (quirk))
6504 PIPE_CONF_CHECK_I(cpu_transcoder);
6506 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6507 PIPE_CONF_CHECK_I(fdi_lanes);
6508 PIPE_CONF_CHECK_M_N(fdi_m_n);
6510 PIPE_CONF_CHECK_I(lane_count);
6511 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6513 if (DISPLAY_VER(dev_priv) < 8) {
6514 PIPE_CONF_CHECK_M_N(dp_m_n);
6516 if (current_config->has_drrs)
6517 PIPE_CONF_CHECK_M_N(dp_m2_n2);
6519 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6521 PIPE_CONF_CHECK_X(output_types);
6523 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6524 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6525 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6526 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6527 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6528 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6530 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6531 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6532 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6533 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6534 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6535 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6537 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6538 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6539 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6540 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6541 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6542 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6544 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6545 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6546 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6547 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6548 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6549 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6551 PIPE_CONF_CHECK_I(pixel_multiplier);
6553 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6554 DRM_MODE_FLAG_INTERLACE);
6556 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6557 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6558 DRM_MODE_FLAG_PHSYNC);
6559 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6560 DRM_MODE_FLAG_NHSYNC);
6561 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6562 DRM_MODE_FLAG_PVSYNC);
6563 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6564 DRM_MODE_FLAG_NVSYNC);
6567 PIPE_CONF_CHECK_I(output_format);
6568 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6569 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6570 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6571 PIPE_CONF_CHECK_BOOL(limited_color_range);
6573 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6574 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6575 PIPE_CONF_CHECK_BOOL(has_infoframe);
6576 PIPE_CONF_CHECK_BOOL(fec_enable);
6578 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6580 PIPE_CONF_CHECK_X(gmch_pfit.control);
6581 /* pfit ratios are autocomputed by the hw on gen4+ */
6582 if (DISPLAY_VER(dev_priv) < 4)
6583 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6584 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6587 * Changing the EDP transcoder input mux
6588 * (A_ONOFF vs. A_ON) requires a full modeset.
6590 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6593 PIPE_CONF_CHECK_I(pipe_src_w);
6594 PIPE_CONF_CHECK_I(pipe_src_h);
6596 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6597 if (current_config->pch_pfit.enabled) {
6598 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6599 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6600 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6601 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6604 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6605 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6607 PIPE_CONF_CHECK_X(gamma_mode);
6608 if (IS_CHERRYVIEW(dev_priv))
6609 PIPE_CONF_CHECK_X(cgm_mode);
6611 PIPE_CONF_CHECK_X(csc_mode);
6612 PIPE_CONF_CHECK_BOOL(gamma_enable);
6613 PIPE_CONF_CHECK_BOOL(csc_enable);
6615 PIPE_CONF_CHECK_I(linetime);
6616 PIPE_CONF_CHECK_I(ips_linetime);
6618 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6620 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6622 if (current_config->active_planes) {
6623 PIPE_CONF_CHECK_BOOL(has_psr);
6624 PIPE_CONF_CHECK_BOOL(has_psr2);
6625 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6626 PIPE_CONF_CHECK_I(dc3co_exitline);
6630 PIPE_CONF_CHECK_BOOL(double_wide);
6632 if (dev_priv->dpll.mgr) {
6633 PIPE_CONF_CHECK_P(shared_dpll);
6635 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6636 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6637 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6638 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6639 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6640 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6641 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6642 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6643 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6644 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6645 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6646 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6647 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6648 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6649 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6650 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6651 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6652 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6653 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6654 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6655 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6656 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6657 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6658 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6659 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6660 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6661 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6662 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6663 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6664 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6665 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6668 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6669 PIPE_CONF_CHECK_X(dsi_pll.div);
6671 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6672 PIPE_CONF_CHECK_I(pipe_bpp);
6674 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6675 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6676 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6678 PIPE_CONF_CHECK_I(min_voltage_level);
6680 if (current_config->has_psr || pipe_config->has_psr)
6681 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6682 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6684 PIPE_CONF_CHECK_X(infoframes.enable);
6686 PIPE_CONF_CHECK_X(infoframes.gcp);
6687 PIPE_CONF_CHECK_INFOFRAME(avi);
6688 PIPE_CONF_CHECK_INFOFRAME(spd);
6689 PIPE_CONF_CHECK_INFOFRAME(hdmi);
6690 PIPE_CONF_CHECK_INFOFRAME(drm);
6691 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6693 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6694 PIPE_CONF_CHECK_I(master_transcoder);
6695 PIPE_CONF_CHECK_BOOL(bigjoiner);
6696 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6697 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6699 PIPE_CONF_CHECK_I(dsc.compression_enable);
6700 PIPE_CONF_CHECK_I(dsc.dsc_split);
6701 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6703 PIPE_CONF_CHECK_BOOL(splitter.enable);
6704 PIPE_CONF_CHECK_I(splitter.link_count);
6705 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6707 PIPE_CONF_CHECK_I(mst_master_transcoder);
6709 PIPE_CONF_CHECK_BOOL(vrr.enable);
6710 PIPE_CONF_CHECK_I(vrr.vmin);
6711 PIPE_CONF_CHECK_I(vrr.vmax);
6712 PIPE_CONF_CHECK_I(vrr.flipline);
6713 PIPE_CONF_CHECK_I(vrr.pipeline_full);
6714 PIPE_CONF_CHECK_I(vrr.guardband);
6716 #undef PIPE_CONF_CHECK_X
6717 #undef PIPE_CONF_CHECK_I
6718 #undef PIPE_CONF_CHECK_BOOL
6719 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6720 #undef PIPE_CONF_CHECK_P
6721 #undef PIPE_CONF_CHECK_FLAGS
6722 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6723 #undef PIPE_CONF_CHECK_COLOR_LUT
6724 #undef PIPE_CONF_QUIRK
6729 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6730 const struct intel_crtc_state *pipe_config)
6732 if (pipe_config->has_pch_encoder) {
6733 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6734 &pipe_config->fdi_m_n);
6735 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6738 * FDI already provided one idea for the dotclock.
6739 * Yell if the encoder disagrees.
6741 drm_WARN(&dev_priv->drm,
6742 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6743 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6744 fdi_dotclock, dotclock);
6748 static void verify_wm_state(struct intel_crtc *crtc,
6749 struct intel_crtc_state *new_crtc_state)
6751 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6752 struct skl_hw_state {
6753 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6754 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6755 struct skl_pipe_wm wm;
6757 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6758 int level, max_level = ilk_wm_max_level(dev_priv);
6759 struct intel_plane *plane;
6760 u8 hw_enabled_slices;
6762 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6765 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6769 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6771 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6773 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6775 if (DISPLAY_VER(dev_priv) >= 11 &&
6776 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6777 drm_err(&dev_priv->drm,
6778 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6779 dev_priv->dbuf.enabled_slices,
6782 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6783 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6784 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6787 for (level = 0; level <= max_level; level++) {
6788 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6789 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6791 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6794 drm_err(&dev_priv->drm,
6795 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6796 plane->base.base.id, plane->base.name, level,
6797 sw_wm_level->enable,
6798 sw_wm_level->blocks,
6800 hw_wm_level->enable,
6801 hw_wm_level->blocks,
6802 hw_wm_level->lines);
6805 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6806 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6808 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6809 drm_err(&dev_priv->drm,
6810 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6811 plane->base.base.id, plane->base.name,
6812 sw_wm_level->enable,
6813 sw_wm_level->blocks,
6815 hw_wm_level->enable,
6816 hw_wm_level->blocks,
6817 hw_wm_level->lines);
6820 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6821 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6823 if (HAS_HW_SAGV_WM(dev_priv) &&
6824 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6825 drm_err(&dev_priv->drm,
6826 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6827 plane->base.base.id, plane->base.name,
6828 sw_wm_level->enable,
6829 sw_wm_level->blocks,
6831 hw_wm_level->enable,
6832 hw_wm_level->blocks,
6833 hw_wm_level->lines);
6836 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6837 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6839 if (HAS_HW_SAGV_WM(dev_priv) &&
6840 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6841 drm_err(&dev_priv->drm,
6842 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6843 plane->base.base.id, plane->base.name,
6844 sw_wm_level->enable,
6845 sw_wm_level->blocks,
6847 hw_wm_level->enable,
6848 hw_wm_level->blocks,
6849 hw_wm_level->lines);
6853 hw_ddb_entry = &hw->ddb_y[plane->id];
6854 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6856 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6857 drm_err(&dev_priv->drm,
6858 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6859 plane->base.base.id, plane->base.name,
6860 sw_ddb_entry->start, sw_ddb_entry->end,
6861 hw_ddb_entry->start, hw_ddb_entry->end);
6869 verify_connector_state(struct intel_atomic_state *state,
6870 struct intel_crtc *crtc)
6872 struct drm_connector *connector;
6873 struct drm_connector_state *new_conn_state;
6876 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6877 struct drm_encoder *encoder = connector->encoder;
6878 struct intel_crtc_state *crtc_state = NULL;
6880 if (new_conn_state->crtc != &crtc->base)
6884 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6886 intel_connector_verify_state(crtc_state, new_conn_state);
6888 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6889 "connector's atomic encoder doesn't match legacy encoder\n");
6894 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6896 struct intel_encoder *encoder;
6897 struct drm_connector *connector;
6898 struct drm_connector_state *old_conn_state, *new_conn_state;
6901 for_each_intel_encoder(&dev_priv->drm, encoder) {
6902 bool enabled = false, found = false;
6905 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6906 encoder->base.base.id,
6907 encoder->base.name);
6909 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6910 new_conn_state, i) {
6911 if (old_conn_state->best_encoder == &encoder->base)
6914 if (new_conn_state->best_encoder != &encoder->base)
6916 found = enabled = true;
6918 I915_STATE_WARN(new_conn_state->crtc !=
6920 "connector's crtc doesn't match encoder crtc\n");
6926 I915_STATE_WARN(!!encoder->base.crtc != enabled,
6927 "encoder's enabled state mismatch "
6928 "(expected %i, found %i)\n",
6929 !!encoder->base.crtc, enabled);
6931 if (!encoder->base.crtc) {
6934 active = encoder->get_hw_state(encoder, &pipe);
6935 I915_STATE_WARN(active,
6936 "encoder detached but still enabled on pipe %c.\n",
6943 verify_crtc_state(struct intel_crtc *crtc,
6944 struct intel_crtc_state *old_crtc_state,
6945 struct intel_crtc_state *new_crtc_state)
6947 struct drm_device *dev = crtc->base.dev;
6948 struct drm_i915_private *dev_priv = to_i915(dev);
6949 struct intel_encoder *encoder;
6950 struct intel_crtc_state *pipe_config = old_crtc_state;
6951 struct drm_atomic_state *state = old_crtc_state->uapi.state;
6952 struct intel_crtc *master_crtc;
6954 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6955 intel_crtc_free_hw_state(old_crtc_state);
6956 intel_crtc_state_reset(old_crtc_state, crtc);
6957 old_crtc_state->uapi.state = state;
6959 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6962 pipe_config->hw.enable = new_crtc_state->hw.enable;
6964 intel_crtc_get_pipe_config(pipe_config);
6966 /* we keep both pipes enabled on 830 */
6967 if (IS_I830(dev_priv) && pipe_config->hw.active)
6968 pipe_config->hw.active = new_crtc_state->hw.active;
6970 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6971 "crtc active state doesn't match with hw state "
6972 "(expected %i, found %i)\n",
6973 new_crtc_state->hw.active, pipe_config->hw.active);
6975 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
6976 "transitional active state does not match atomic hw state "
6977 "(expected %i, found %i)\n",
6978 new_crtc_state->hw.active, crtc->active);
6980 master_crtc = intel_master_crtc(new_crtc_state);
6982 for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
6986 active = encoder->get_hw_state(encoder, &pipe);
6987 I915_STATE_WARN(active != new_crtc_state->hw.active,
6988 "[ENCODER:%i] active %i with crtc active %i\n",
6989 encoder->base.base.id, active,
6990 new_crtc_state->hw.active);
6992 I915_STATE_WARN(active && master_crtc->pipe != pipe,
6993 "Encoder connected to wrong pipe %c\n",
6997 intel_encoder_get_config(encoder, pipe_config);
7000 if (!new_crtc_state->hw.active)
7003 intel_pipe_config_sanity_check(dev_priv, pipe_config);
7005 if (!intel_pipe_config_compare(new_crtc_state,
7006 pipe_config, false)) {
7007 I915_STATE_WARN(1, "pipe state doesn't match!\n");
7008 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
7009 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7014 intel_verify_planes(struct intel_atomic_state *state)
7016 struct intel_plane *plane;
7017 const struct intel_plane_state *plane_state;
7020 for_each_new_intel_plane_in_state(state, plane,
7022 assert_plane(plane, plane_state->planar_slave ||
7023 plane_state->uapi.visible);
7027 verify_single_dpll_state(struct drm_i915_private *dev_priv,
7028 struct intel_shared_dpll *pll,
7029 struct intel_crtc *crtc,
7030 struct intel_crtc_state *new_crtc_state)
7032 struct intel_dpll_hw_state dpll_hw_state;
7036 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7038 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7040 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7042 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7043 I915_STATE_WARN(!pll->on && pll->active_mask,
7044 "pll in active use but not on in sw tracking\n");
7045 I915_STATE_WARN(pll->on && !pll->active_mask,
7046 "pll is on but not used by any active pipe\n");
7047 I915_STATE_WARN(pll->on != active,
7048 "pll on state mismatch (expected %i, found %i)\n",
7053 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7054 "more active pll users than references: 0x%x vs 0x%x\n",
7055 pll->active_mask, pll->state.pipe_mask);
7060 pipe_mask = BIT(crtc->pipe);
7062 if (new_crtc_state->hw.active)
7063 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7064 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7065 pipe_name(crtc->pipe), pll->active_mask);
7067 I915_STATE_WARN(pll->active_mask & pipe_mask,
7068 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7069 pipe_name(crtc->pipe), pll->active_mask);
7071 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7072 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7073 pipe_mask, pll->state.pipe_mask);
7075 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7077 sizeof(dpll_hw_state)),
7078 "pll hw state mismatch\n");
7082 verify_shared_dpll_state(struct intel_crtc *crtc,
7083 struct intel_crtc_state *old_crtc_state,
7084 struct intel_crtc_state *new_crtc_state)
7086 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7088 if (new_crtc_state->shared_dpll)
7089 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7091 if (old_crtc_state->shared_dpll &&
7092 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7093 u8 pipe_mask = BIT(crtc->pipe);
7094 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7096 I915_STATE_WARN(pll->active_mask & pipe_mask,
7097 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7098 pipe_name(crtc->pipe), pll->active_mask);
7099 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7100 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7101 pipe_name(crtc->pipe), pll->state.pipe_mask);
7106 verify_mpllb_state(struct intel_atomic_state *state,
7107 struct intel_crtc_state *new_crtc_state)
7109 struct drm_i915_private *i915 = to_i915(state->base.dev);
7110 struct intel_mpllb_state mpllb_hw_state = { 0 };
7111 struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7112 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7113 struct intel_encoder *encoder;
7118 if (!new_crtc_state->hw.active)
7121 encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7122 intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7124 #define MPLLB_CHECK(name) do { \
7125 if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7126 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7127 "(expected 0x%08x, found 0x%08x)", \
7128 mpllb_sw_state->name, \
7129 mpllb_hw_state.name); \
7133 MPLLB_CHECK(mpllb_cp);
7134 MPLLB_CHECK(mpllb_div);
7135 MPLLB_CHECK(mpllb_div2);
7136 MPLLB_CHECK(mpllb_fracn1);
7137 MPLLB_CHECK(mpllb_fracn2);
7138 MPLLB_CHECK(mpllb_sscen);
7139 MPLLB_CHECK(mpllb_sscstep);
7142 * ref_control is handled by the hardware/firemware and never
7143 * programmed by the software, but the proper values are supplied
7144 * in the bspec for verification purposes.
7146 MPLLB_CHECK(ref_control);
7152 intel_modeset_verify_crtc(struct intel_crtc *crtc,
7153 struct intel_atomic_state *state,
7154 struct intel_crtc_state *old_crtc_state,
7155 struct intel_crtc_state *new_crtc_state)
7157 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7160 verify_wm_state(crtc, new_crtc_state);
7161 verify_connector_state(state, crtc);
7162 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7163 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7164 verify_mpllb_state(state, new_crtc_state);
7168 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7172 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7173 verify_single_dpll_state(dev_priv,
7174 &dev_priv->dpll.shared_dplls[i],
7179 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7180 struct intel_atomic_state *state)
7182 verify_encoder_state(dev_priv, state);
7183 verify_connector_state(state, NULL);
7184 verify_disabled_dpll_state(dev_priv);
7187 int intel_modeset_all_pipes(struct intel_atomic_state *state)
7189 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7190 struct intel_crtc *crtc;
7193 * Add all pipes to the state, and force
7194 * a modeset on all the active ones.
7196 for_each_intel_crtc(&dev_priv->drm, crtc) {
7197 struct intel_crtc_state *crtc_state;
7200 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7201 if (IS_ERR(crtc_state))
7202 return PTR_ERR(crtc_state);
7204 if (!crtc_state->hw.active ||
7205 drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7208 crtc_state->uapi.mode_changed = true;
7210 ret = drm_atomic_add_affected_connectors(&state->base,
7215 ret = intel_atomic_add_affected_planes(state, crtc);
7219 crtc_state->update_planes |= crtc_state->active_planes;
7226 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7228 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7229 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7230 struct drm_display_mode adjusted_mode =
7231 crtc_state->hw.adjusted_mode;
7233 if (crtc_state->vrr.enable) {
7234 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7235 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7236 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7237 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7240 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7242 crtc->mode_flags = crtc_state->mode_flags;
7245 * The scanline counter increments at the leading edge of hsync.
7247 * On most platforms it starts counting from vtotal-1 on the
7248 * first active line. That means the scanline counter value is
7249 * always one less than what we would expect. Ie. just after
7250 * start of vblank, which also occurs at start of hsync (on the
7251 * last active line), the scanline counter will read vblank_start-1.
7253 * On gen2 the scanline counter starts counting from 1 instead
7254 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7255 * to keep the value positive), instead of adding one.
7257 * On HSW+ the behaviour of the scanline counter depends on the output
7258 * type. For DP ports it behaves like most other platforms, but on HDMI
7259 * there's an extra 1 line difference. So we need to add two instead of
7262 * On VLV/CHV DSI the scanline counter would appear to increment
7263 * approx. 1/3 of a scanline before start of vblank. Unfortunately
7264 * that means we can't tell whether we're in vblank or not while
7265 * we're on that particular line. We must still set scanline_offset
7266 * to 1 so that the vblank timestamps come out correct when we query
7267 * the scanline counter from within the vblank interrupt handler.
7268 * However if queried just before the start of vblank we'll get an
7269 * answer that's slightly in the future.
7271 if (DISPLAY_VER(dev_priv) == 2) {
7274 vtotal = adjusted_mode.crtc_vtotal;
7275 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7278 crtc->scanline_offset = vtotal - 1;
7279 } else if (HAS_DDI(dev_priv) &&
7280 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7281 crtc->scanline_offset = 2;
7283 crtc->scanline_offset = 1;
7287 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7289 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7290 struct intel_crtc_state *new_crtc_state;
7291 struct intel_crtc *crtc;
7294 if (!dev_priv->dpll_funcs)
7297 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7298 if (!intel_crtc_needs_modeset(new_crtc_state))
7301 intel_release_shared_dplls(state, crtc);
7306 * This implements the workaround described in the "notes" section of the mode
7307 * set sequence documentation. When going from no pipes or single pipe to
7308 * multiple pipes, and planes are enabled after the pipe, we need to wait at
7309 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7311 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7313 struct intel_crtc_state *crtc_state;
7314 struct intel_crtc *crtc;
7315 struct intel_crtc_state *first_crtc_state = NULL;
7316 struct intel_crtc_state *other_crtc_state = NULL;
7317 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7320 /* look at all crtc's that are going to be enabled in during modeset */
7321 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7322 if (!crtc_state->hw.active ||
7323 !intel_crtc_needs_modeset(crtc_state))
7326 if (first_crtc_state) {
7327 other_crtc_state = crtc_state;
7330 first_crtc_state = crtc_state;
7331 first_pipe = crtc->pipe;
7335 /* No workaround needed? */
7336 if (!first_crtc_state)
7339 /* w/a possibly needed, check how many crtc's are already enabled. */
7340 for_each_intel_crtc(state->base.dev, crtc) {
7341 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7342 if (IS_ERR(crtc_state))
7343 return PTR_ERR(crtc_state);
7345 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7347 if (!crtc_state->hw.active ||
7348 intel_crtc_needs_modeset(crtc_state))
7351 /* 2 or more enabled crtcs means no need for w/a */
7352 if (enabled_pipe != INVALID_PIPE)
7355 enabled_pipe = crtc->pipe;
7358 if (enabled_pipe != INVALID_PIPE)
7359 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7360 else if (other_crtc_state)
7361 other_crtc_state->hsw_workaround_pipe = first_pipe;
7366 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7369 const struct intel_crtc_state *crtc_state;
7370 struct intel_crtc *crtc;
7373 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7374 if (crtc_state->hw.active)
7375 active_pipes |= BIT(crtc->pipe);
7377 active_pipes &= ~BIT(crtc->pipe);
7380 return active_pipes;
7383 static int intel_modeset_checks(struct intel_atomic_state *state)
7385 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7387 state->modeset = true;
7389 if (IS_HASWELL(dev_priv))
7390 return hsw_mode_set_planes_workaround(state);
7395 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7396 struct intel_crtc_state *new_crtc_state)
7398 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7401 new_crtc_state->uapi.mode_changed = false;
7402 new_crtc_state->update_pipe = true;
7405 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7406 struct intel_crtc_state *new_crtc_state)
7409 * If we're not doing the full modeset we want to
7410 * keep the current M/N values as they may be
7411 * sufficiently different to the computed values
7412 * to cause problems.
7414 * FIXME: should really copy more fuzzy state here
7416 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7417 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7418 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7419 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7422 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7423 struct intel_crtc *crtc,
7426 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7427 struct intel_plane *plane;
7429 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7430 struct intel_plane_state *plane_state;
7432 if ((plane_ids_mask & BIT(plane->id)) == 0)
7435 plane_state = intel_atomic_get_plane_state(state, plane);
7436 if (IS_ERR(plane_state))
7437 return PTR_ERR(plane_state);
7443 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7444 struct intel_crtc *crtc)
7446 const struct intel_crtc_state *old_crtc_state =
7447 intel_atomic_get_old_crtc_state(state, crtc);
7448 const struct intel_crtc_state *new_crtc_state =
7449 intel_atomic_get_new_crtc_state(state, crtc);
7451 return intel_crtc_add_planes_to_state(state, crtc,
7452 old_crtc_state->enabled_planes |
7453 new_crtc_state->enabled_planes);
7456 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7458 /* See {hsw,vlv,ivb}_plane_ratio() */
7459 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7460 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7461 IS_IVYBRIDGE(dev_priv);
7464 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7465 struct intel_crtc *crtc,
7466 struct intel_crtc *other)
7468 const struct intel_plane_state *plane_state;
7469 struct intel_plane *plane;
7473 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7474 if (plane->pipe == crtc->pipe)
7475 plane_ids |= BIT(plane->id);
7478 return intel_crtc_add_planes_to_state(state, other, plane_ids);
7481 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7483 const struct intel_crtc_state *crtc_state;
7484 struct intel_crtc *crtc;
7487 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7490 if (!crtc_state->bigjoiner)
7493 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7494 crtc_state->bigjoiner_linked_crtc);
7502 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7504 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7505 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7506 struct intel_plane_state *plane_state;
7507 struct intel_plane *plane;
7508 struct intel_crtc *crtc;
7511 ret = icl_add_linked_planes(state);
7515 ret = intel_bigjoiner_add_affected_planes(state);
7519 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7520 ret = intel_plane_atomic_check(state, plane);
7522 drm_dbg_atomic(&dev_priv->drm,
7523 "[PLANE:%d:%s] atomic driver check failed\n",
7524 plane->base.base.id, plane->base.name);
7529 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7530 new_crtc_state, i) {
7531 u8 old_active_planes, new_active_planes;
7533 ret = icl_check_nv12_planes(new_crtc_state);
7538 * On some platforms the number of active planes affects
7539 * the planes' minimum cdclk calculation. Add such planes
7540 * to the state before we compute the minimum cdclk.
7542 if (!active_planes_affects_min_cdclk(dev_priv))
7545 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7546 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7548 if (hweight8(old_active_planes) == hweight8(new_active_planes))
7551 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7559 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7561 struct intel_crtc_state *crtc_state;
7562 struct intel_crtc *crtc;
7565 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7566 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7569 ret = intel_crtc_atomic_check(state, crtc);
7571 drm_dbg_atomic(&i915->drm,
7572 "[CRTC:%d:%s] atomic driver check failed\n",
7573 crtc->base.base.id, crtc->base.name);
7581 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7584 const struct intel_crtc_state *new_crtc_state;
7585 struct intel_crtc *crtc;
7588 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7589 if (new_crtc_state->hw.enable &&
7590 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7591 intel_crtc_needs_modeset(new_crtc_state))
7598 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7599 struct intel_crtc *crtc,
7600 struct intel_crtc_state *old_crtc_state,
7601 struct intel_crtc_state *new_crtc_state)
7603 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7604 struct intel_crtc *slave_crtc, *master_crtc;
7606 /* slave being enabled, is master is still claiming this crtc? */
7607 if (old_crtc_state->bigjoiner_slave) {
7609 master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7610 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7611 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7615 if (!new_crtc_state->bigjoiner)
7618 slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7620 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
7621 "CRTC + 1 to be used, doesn't exist\n",
7622 crtc->base.base.id, crtc->base.name);
7626 new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7627 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7629 if (IS_ERR(slave_crtc_state))
7630 return PTR_ERR(slave_crtc_state);
7632 /* master being enabled, slave was already configured? */
7633 if (slave_crtc_state->uapi.enable)
7636 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
7637 slave_crtc->base.base.id, slave_crtc->base.name);
7639 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7642 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7643 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7644 slave_crtc->base.base.id, slave_crtc->base.name,
7645 master_crtc->base.base.id, master_crtc->base.name);
7649 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7650 struct intel_crtc_state *master_crtc_state)
7652 struct intel_crtc_state *slave_crtc_state =
7653 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7655 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7656 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7657 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7658 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7662 * DOC: asynchronous flip implementation
7664 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7665 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7666 * Correspondingly, support is currently added for primary plane only.
7668 * Async flip can only change the plane surface address, so anything else
7669 * changing is rejected from the intel_atomic_check_async() function.
7670 * Once this check is cleared, flip done interrupt is enabled using
7671 * the intel_crtc_enable_flip_done() function.
7673 * As soon as the surface address register is written, flip done interrupt is
7674 * generated and the requested events are sent to the usersapce in the interrupt
7675 * handler itself. The timestamp and sequence sent during the flip done event
7676 * correspond to the last vblank and have no relation to the actual time when
7677 * the flip done event was sent.
7679 static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7681 struct drm_i915_private *i915 = to_i915(state->base.dev);
7682 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7683 const struct intel_plane_state *new_plane_state, *old_plane_state;
7684 struct intel_plane *plane;
7687 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7688 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7690 if (intel_crtc_needs_modeset(new_crtc_state)) {
7691 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7695 if (!new_crtc_state->hw.active) {
7696 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7699 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7700 drm_dbg_kms(&i915->drm,
7701 "Active planes cannot be changed during async flip\n");
7705 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7706 new_plane_state, i) {
7707 if (plane->pipe != crtc->pipe)
7711 * TODO: Async flip is only supported through the page flip IOCTL
7712 * as of now. So support currently added for primary plane only.
7713 * Support for other planes on platforms on which supports
7714 * this(vlv/chv and icl+) should be added when async flip is
7715 * enabled in the atomic IOCTL path.
7717 if (!plane->async_flip)
7721 * FIXME: This check is kept generic for all platforms.
7722 * Need to verify this for all gen9 platforms to enable
7723 * this selectively if required.
7725 switch (new_plane_state->hw.fb->modifier) {
7726 case I915_FORMAT_MOD_X_TILED:
7727 case I915_FORMAT_MOD_Y_TILED:
7728 case I915_FORMAT_MOD_Yf_TILED:
7731 drm_dbg_kms(&i915->drm,
7732 "Linear memory/CCS does not support async flips\n");
7736 if (new_plane_state->hw.fb->format->num_planes > 1) {
7737 drm_dbg_kms(&i915->drm,
7738 "Planar formats not supported with async flips\n");
7742 if (old_plane_state->view.color_plane[0].mapping_stride !=
7743 new_plane_state->view.color_plane[0].mapping_stride) {
7744 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7748 if (old_plane_state->hw.fb->modifier !=
7749 new_plane_state->hw.fb->modifier) {
7750 drm_dbg_kms(&i915->drm,
7751 "Framebuffer modifiers cannot be changed in async flip\n");
7755 if (old_plane_state->hw.fb->format !=
7756 new_plane_state->hw.fb->format) {
7757 drm_dbg_kms(&i915->drm,
7758 "Framebuffer format cannot be changed in async flip\n");
7762 if (old_plane_state->hw.rotation !=
7763 new_plane_state->hw.rotation) {
7764 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7768 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7769 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7770 drm_dbg_kms(&i915->drm,
7771 "Plane size/co-ordinates cannot be changed in async flip\n");
7775 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7776 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7780 if (old_plane_state->hw.pixel_blend_mode !=
7781 new_plane_state->hw.pixel_blend_mode) {
7782 drm_dbg_kms(&i915->drm,
7783 "Pixel blend mode cannot be changed in async flip\n");
7787 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7788 drm_dbg_kms(&i915->drm,
7789 "Color encoding cannot be changed in async flip\n");
7793 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7794 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7798 /* plane decryption is allow to change only in synchronous flips */
7799 if (old_plane_state->decrypt != new_plane_state->decrypt)
7806 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7808 struct intel_crtc_state *crtc_state;
7809 struct intel_crtc *crtc;
7812 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7813 struct intel_crtc_state *linked_crtc_state;
7814 struct intel_crtc *linked_crtc;
7817 if (!crtc_state->bigjoiner)
7820 linked_crtc = crtc_state->bigjoiner_linked_crtc;
7821 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7822 if (IS_ERR(linked_crtc_state))
7823 return PTR_ERR(linked_crtc_state);
7825 if (!intel_crtc_needs_modeset(crtc_state))
7828 linked_crtc_state->uapi.mode_changed = true;
7830 ret = drm_atomic_add_affected_connectors(&state->base,
7831 &linked_crtc->base);
7835 ret = intel_atomic_add_affected_planes(state, linked_crtc);
7840 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7841 /* Kill old bigjoiner link, we may re-establish afterwards */
7842 if (intel_crtc_needs_modeset(crtc_state) &&
7843 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7844 kill_bigjoiner_slave(state, crtc_state);
7851 * intel_atomic_check - validate state object
7853 * @_state: state to validate
7855 static int intel_atomic_check(struct drm_device *dev,
7856 struct drm_atomic_state *_state)
7858 struct drm_i915_private *dev_priv = to_i915(dev);
7859 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7860 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7861 struct intel_crtc *crtc;
7863 bool any_ms = false;
7865 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7866 new_crtc_state, i) {
7867 if (new_crtc_state->inherited != old_crtc_state->inherited)
7868 new_crtc_state->uapi.mode_changed = true;
7871 intel_vrr_check_modeset(state);
7873 ret = drm_atomic_helper_check_modeset(dev, &state->base);
7877 ret = intel_bigjoiner_add_affected_crtcs(state);
7881 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7882 new_crtc_state, i) {
7883 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7885 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7890 if (!new_crtc_state->uapi.enable) {
7891 if (!new_crtc_state->bigjoiner_slave) {
7892 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7898 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7902 ret = intel_modeset_pipe_config(state, new_crtc_state);
7906 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7912 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7913 new_crtc_state, i) {
7914 if (!intel_crtc_needs_modeset(new_crtc_state))
7917 ret = intel_modeset_pipe_config_late(new_crtc_state);
7921 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7925 * Check if fastset is allowed by external dependencies like other
7926 * pipes and transcoders.
7928 * Right now it only forces a fullmodeset when the MST master
7929 * transcoder did not changed but the pipe of the master transcoder
7930 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7931 * in case of port synced crtcs, if one of the synced crtcs
7932 * needs a full modeset, all other synced crtcs should be
7933 * forced a full modeset.
7935 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7936 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7939 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7940 enum transcoder master = new_crtc_state->mst_master_transcoder;
7942 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7943 new_crtc_state->uapi.mode_changed = true;
7944 new_crtc_state->update_pipe = false;
7948 if (is_trans_port_sync_mode(new_crtc_state)) {
7949 u8 trans = new_crtc_state->sync_mode_slaves_mask;
7951 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7952 trans |= BIT(new_crtc_state->master_transcoder);
7954 if (intel_cpu_transcoders_need_modeset(state, trans)) {
7955 new_crtc_state->uapi.mode_changed = true;
7956 new_crtc_state->update_pipe = false;
7960 if (new_crtc_state->bigjoiner) {
7961 struct intel_crtc_state *linked_crtc_state =
7962 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
7964 if (intel_crtc_needs_modeset(linked_crtc_state)) {
7965 new_crtc_state->uapi.mode_changed = true;
7966 new_crtc_state->update_pipe = false;
7971 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7972 new_crtc_state, i) {
7973 if (intel_crtc_needs_modeset(new_crtc_state)) {
7978 if (!new_crtc_state->update_pipe)
7981 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
7984 if (any_ms && !check_digital_port_conflicts(state)) {
7985 drm_dbg_kms(&dev_priv->drm,
7986 "rejecting conflicting digital port configuration\n");
7991 ret = drm_dp_mst_atomic_check(&state->base);
7995 ret = intel_atomic_check_planes(state);
7999 ret = intel_compute_global_watermarks(state);
8003 ret = intel_bw_atomic_check(state);
8007 ret = intel_cdclk_atomic_check(state, &any_ms);
8011 if (intel_any_crtc_needs_modeset(state))
8015 ret = intel_modeset_checks(state);
8019 ret = intel_modeset_calc_cdclk(state);
8023 intel_modeset_clear_plls(state);
8026 ret = intel_atomic_check_crtcs(state);
8030 ret = intel_fbc_atomic_check(state);
8034 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8035 new_crtc_state, i) {
8036 if (new_crtc_state->uapi.async_flip) {
8037 ret = intel_atomic_check_async(state, crtc);
8042 if (!intel_crtc_needs_modeset(new_crtc_state) &&
8043 !new_crtc_state->update_pipe)
8046 intel_dump_pipe_config(new_crtc_state, state,
8047 intel_crtc_needs_modeset(new_crtc_state) ?
8048 "[modeset]" : "[fastset]");
8054 if (ret == -EDEADLK)
8058 * FIXME would probably be nice to know which crtc specifically
8059 * caused the failure, in cases where we can pinpoint it.
8061 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8063 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8068 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8070 struct intel_crtc_state *crtc_state;
8071 struct intel_crtc *crtc;
8074 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8078 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8079 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8081 if (mode_changed || crtc_state->update_pipe ||
8082 crtc_state->uapi.color_mgmt_changed) {
8083 intel_dsb_prepare(crtc_state);
8090 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8091 struct intel_crtc_state *crtc_state)
8093 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8095 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8096 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8098 if (crtc_state->has_pch_encoder) {
8099 enum pipe pch_transcoder =
8100 intel_crtc_pch_transcoder(crtc);
8102 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8106 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8107 const struct intel_crtc_state *new_crtc_state)
8109 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8110 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8113 * Update pipe size and adjust fitter if needed: the reason for this is
8114 * that in compute_mode_changes we check the native mode (not the pfit
8115 * mode) to see if we can flip rather than do a full mode set. In the
8116 * fastboot case, we'll flip, but if we don't update the pipesrc and
8117 * pfit state, we'll end up with a big fb scanned out into the wrong
8120 intel_set_pipe_src_size(new_crtc_state);
8122 /* on skylake this is done by detaching scalers */
8123 if (DISPLAY_VER(dev_priv) >= 9) {
8124 if (new_crtc_state->pch_pfit.enabled)
8125 skl_pfit_enable(new_crtc_state);
8126 } else if (HAS_PCH_SPLIT(dev_priv)) {
8127 if (new_crtc_state->pch_pfit.enabled)
8128 ilk_pfit_enable(new_crtc_state);
8129 else if (old_crtc_state->pch_pfit.enabled)
8130 ilk_pfit_disable(old_crtc_state);
8134 * The register is supposedly single buffered so perhaps
8135 * not 100% correct to do this here. But SKL+ calculate
8136 * this based on the adjust pixel rate so pfit changes do
8137 * affect it and so it must be updated for fastsets.
8138 * HSW/BDW only really need this here for fastboot, after
8139 * that the value should not change without a full modeset.
8141 if (DISPLAY_VER(dev_priv) >= 9 ||
8142 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8143 hsw_set_linetime_wm(new_crtc_state);
8145 if (DISPLAY_VER(dev_priv) >= 11)
8146 icl_set_pipe_chicken(new_crtc_state);
8149 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8150 struct intel_crtc *crtc)
8152 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8153 const struct intel_crtc_state *old_crtc_state =
8154 intel_atomic_get_old_crtc_state(state, crtc);
8155 const struct intel_crtc_state *new_crtc_state =
8156 intel_atomic_get_new_crtc_state(state, crtc);
8157 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8160 * During modesets pipe configuration was programmed as the
8164 if (new_crtc_state->uapi.color_mgmt_changed ||
8165 new_crtc_state->update_pipe)
8166 intel_color_commit(new_crtc_state);
8168 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8169 bdw_set_pipemisc(new_crtc_state);
8171 if (new_crtc_state->update_pipe)
8172 intel_pipe_fastset(old_crtc_state, new_crtc_state);
8175 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8177 intel_atomic_update_watermarks(state, crtc);
8180 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8181 struct intel_crtc *crtc)
8183 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8184 const struct intel_crtc_state *new_crtc_state =
8185 intel_atomic_get_new_crtc_state(state, crtc);
8188 * Disable the scaler(s) after the plane(s) so that we don't
8189 * get a catastrophic underrun even if the two operations
8190 * end up happening in two different frames.
8192 if (DISPLAY_VER(dev_priv) >= 9 &&
8193 !intel_crtc_needs_modeset(new_crtc_state))
8194 skl_detach_scalers(new_crtc_state);
8197 static void intel_enable_crtc(struct intel_atomic_state *state,
8198 struct intel_crtc *crtc)
8200 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8201 const struct intel_crtc_state *new_crtc_state =
8202 intel_atomic_get_new_crtc_state(state, crtc);
8204 if (!intel_crtc_needs_modeset(new_crtc_state))
8207 intel_crtc_update_active_timings(new_crtc_state);
8209 dev_priv->display->crtc_enable(state, crtc);
8211 if (new_crtc_state->bigjoiner_slave)
8214 /* vblanks work again, re-enable pipe CRC. */
8215 intel_crtc_enable_pipe_crc(crtc);
8218 static void intel_update_crtc(struct intel_atomic_state *state,
8219 struct intel_crtc *crtc)
8221 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8222 const struct intel_crtc_state *old_crtc_state =
8223 intel_atomic_get_old_crtc_state(state, crtc);
8224 struct intel_crtc_state *new_crtc_state =
8225 intel_atomic_get_new_crtc_state(state, crtc);
8226 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8229 if (new_crtc_state->preload_luts &&
8230 (new_crtc_state->uapi.color_mgmt_changed ||
8231 new_crtc_state->update_pipe))
8232 intel_color_load_luts(new_crtc_state);
8234 intel_pre_plane_update(state, crtc);
8236 if (new_crtc_state->update_pipe)
8237 intel_encoders_update_pipe(state, crtc);
8240 intel_fbc_update(state, crtc);
8242 intel_update_planes_on_crtc(state, crtc);
8244 /* Perform vblank evasion around commit operation */
8245 intel_pipe_update_start(new_crtc_state);
8247 commit_pipe_pre_planes(state, crtc);
8249 if (DISPLAY_VER(dev_priv) >= 9)
8250 skl_arm_planes_on_crtc(state, crtc);
8252 i9xx_arm_planes_on_crtc(state, crtc);
8254 commit_pipe_post_planes(state, crtc);
8256 intel_pipe_update_end(new_crtc_state);
8259 * We usually enable FIFO underrun interrupts as part of the
8260 * CRTC enable sequence during modesets. But when we inherit a
8261 * valid pipe configuration from the BIOS we need to take care
8262 * of enabling them on the CRTC's first fastset.
8264 if (new_crtc_state->update_pipe && !modeset &&
8265 old_crtc_state->inherited)
8266 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8269 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8270 struct intel_crtc_state *old_crtc_state,
8271 struct intel_crtc_state *new_crtc_state,
8272 struct intel_crtc *crtc)
8274 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8277 * We need to disable pipe CRC before disabling the pipe,
8278 * or we race against vblank off.
8280 intel_crtc_disable_pipe_crc(crtc);
8282 dev_priv->display->crtc_disable(state, crtc);
8283 crtc->active = false;
8284 intel_fbc_disable(crtc);
8285 intel_disable_shared_dpll(old_crtc_state);
8287 /* FIXME unify this for all platforms */
8288 if (!new_crtc_state->hw.active &&
8289 !HAS_GMCH(dev_priv))
8290 intel_initial_watermarks(state, crtc);
8293 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8295 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8296 struct intel_crtc *crtc;
8300 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8301 new_crtc_state, i) {
8302 if (!intel_crtc_needs_modeset(new_crtc_state))
8305 if (!old_crtc_state->hw.active)
8308 intel_pre_plane_update(state, crtc);
8309 intel_crtc_disable_planes(state, crtc);
8312 /* Only disable port sync and MST slaves */
8313 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8314 new_crtc_state, i) {
8315 if (!intel_crtc_needs_modeset(new_crtc_state))
8318 if (!old_crtc_state->hw.active)
8321 /* In case of Transcoder port Sync master slave CRTCs can be
8322 * assigned in any order and we need to make sure that
8323 * slave CRTCs are disabled first and then master CRTC since
8324 * Slave vblanks are masked till Master Vblanks.
8326 if (!is_trans_port_sync_slave(old_crtc_state) &&
8327 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8328 !old_crtc_state->bigjoiner_slave)
8331 intel_old_crtc_state_disables(state, old_crtc_state,
8332 new_crtc_state, crtc);
8333 handled |= BIT(crtc->pipe);
8336 /* Disable everything else left on */
8337 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8338 new_crtc_state, i) {
8339 if (!intel_crtc_needs_modeset(new_crtc_state) ||
8340 (handled & BIT(crtc->pipe)))
8343 if (!old_crtc_state->hw.active)
8346 intel_old_crtc_state_disables(state, old_crtc_state,
8347 new_crtc_state, crtc);
8351 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8353 struct intel_crtc_state *new_crtc_state;
8354 struct intel_crtc *crtc;
8357 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8358 if (!new_crtc_state->hw.active)
8361 intel_enable_crtc(state, crtc);
8362 intel_update_crtc(state, crtc);
8366 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8368 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8369 struct intel_crtc *crtc;
8370 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8371 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8372 u8 update_pipes = 0, modeset_pipes = 0;
8375 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8376 enum pipe pipe = crtc->pipe;
8378 if (!new_crtc_state->hw.active)
8381 /* ignore allocations for crtc's that have been turned off. */
8382 if (!intel_crtc_needs_modeset(new_crtc_state)) {
8383 entries[pipe] = old_crtc_state->wm.skl.ddb;
8384 update_pipes |= BIT(pipe);
8386 modeset_pipes |= BIT(pipe);
8391 * Whenever the number of active pipes changes, we need to make sure we
8392 * update the pipes in the right order so that their ddb allocations
8393 * never overlap with each other between CRTC updates. Otherwise we'll
8394 * cause pipe underruns and other bad stuff.
8396 * So first lets enable all pipes that do not need a fullmodeset as
8397 * those don't have any external dependency.
8399 while (update_pipes) {
8400 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8401 new_crtc_state, i) {
8402 enum pipe pipe = crtc->pipe;
8404 if ((update_pipes & BIT(pipe)) == 0)
8407 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8408 entries, I915_MAX_PIPES, pipe))
8411 entries[pipe] = new_crtc_state->wm.skl.ddb;
8412 update_pipes &= ~BIT(pipe);
8414 intel_update_crtc(state, crtc);
8417 * If this is an already active pipe, it's DDB changed,
8418 * and this isn't the last pipe that needs updating
8419 * then we need to wait for a vblank to pass for the
8420 * new ddb allocation to take effect.
8422 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8423 &old_crtc_state->wm.skl.ddb) &&
8424 (update_pipes | modeset_pipes))
8425 intel_crtc_wait_for_next_vblank(crtc);
8429 update_pipes = modeset_pipes;
8432 * Enable all pipes that needs a modeset and do not depends on other
8435 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8436 enum pipe pipe = crtc->pipe;
8438 if ((modeset_pipes & BIT(pipe)) == 0)
8441 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8442 is_trans_port_sync_master(new_crtc_state) ||
8443 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8446 modeset_pipes &= ~BIT(pipe);
8448 intel_enable_crtc(state, crtc);
8452 * Then we enable all remaining pipes that depend on other
8453 * pipes: MST slaves and port sync masters, big joiner master
8455 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8456 enum pipe pipe = crtc->pipe;
8458 if ((modeset_pipes & BIT(pipe)) == 0)
8461 modeset_pipes &= ~BIT(pipe);
8463 intel_enable_crtc(state, crtc);
8467 * Finally we do the plane updates/etc. for all pipes that got enabled.
8469 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8470 enum pipe pipe = crtc->pipe;
8472 if ((update_pipes & BIT(pipe)) == 0)
8475 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8476 entries, I915_MAX_PIPES, pipe));
8478 entries[pipe] = new_crtc_state->wm.skl.ddb;
8479 update_pipes &= ~BIT(pipe);
8481 intel_update_crtc(state, crtc);
8484 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8485 drm_WARN_ON(&dev_priv->drm, update_pipes);
8488 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8490 struct intel_atomic_state *state, *next;
8491 struct llist_node *freed;
8493 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8494 llist_for_each_entry_safe(state, next, freed, freed)
8495 drm_atomic_state_put(&state->base);
8498 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8500 struct drm_i915_private *dev_priv =
8501 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8503 intel_atomic_helper_free_state(dev_priv);
8506 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8508 struct wait_queue_entry wait_fence, wait_reset;
8509 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8511 init_wait_entry(&wait_fence, 0);
8512 init_wait_entry(&wait_reset, 0);
8514 prepare_to_wait(&intel_state->commit_ready.wait,
8515 &wait_fence, TASK_UNINTERRUPTIBLE);
8516 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8517 I915_RESET_MODESET),
8518 &wait_reset, TASK_UNINTERRUPTIBLE);
8521 if (i915_sw_fence_done(&intel_state->commit_ready) ||
8522 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
8527 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8528 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8529 I915_RESET_MODESET),
8533 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8535 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8536 struct intel_crtc *crtc;
8539 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8541 intel_dsb_cleanup(old_crtc_state);
8544 static void intel_atomic_cleanup_work(struct work_struct *work)
8546 struct intel_atomic_state *state =
8547 container_of(work, struct intel_atomic_state, base.commit_work);
8548 struct drm_i915_private *i915 = to_i915(state->base.dev);
8550 intel_cleanup_dsbs(state);
8551 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8552 drm_atomic_helper_commit_cleanup_done(&state->base);
8553 drm_atomic_state_put(&state->base);
8555 intel_atomic_helper_free_state(i915);
8558 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8560 struct drm_i915_private *i915 = to_i915(state->base.dev);
8561 struct intel_plane *plane;
8562 struct intel_plane_state *plane_state;
8565 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8566 struct drm_framebuffer *fb = plane_state->hw.fb;
8573 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8578 * The layout of the fast clear color value expected by HW
8579 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8580 * - 4 x 4 bytes per-channel value
8581 * (in surface type specific float/int format provided by the fb user)
8582 * - 8 bytes native color value used by the display
8583 * (converted/written by GPU during a fast clear operation using the
8584 * above per-channel values)
8586 * The commit's FB prepare hook already ensured that FB obj is pinned and the
8587 * caller made sure that the object is synced wrt. the related color clear value
8590 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8591 fb->offsets[cc_plane] + 16,
8592 &plane_state->ccval,
8593 sizeof(plane_state->ccval));
8594 /* The above could only fail if the FB obj has an unexpected backing store type. */
8595 drm_WARN_ON(&i915->drm, ret);
8599 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8601 struct drm_device *dev = state->base.dev;
8602 struct drm_i915_private *dev_priv = to_i915(dev);
8603 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8604 struct intel_crtc *crtc;
8605 u64 put_domains[I915_MAX_PIPES] = {};
8606 intel_wakeref_t wakeref = 0;
8609 intel_atomic_commit_fence_wait(state);
8611 drm_atomic_helper_wait_for_dependencies(&state->base);
8614 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8616 intel_atomic_prepare_plane_clear_colors(state);
8618 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8619 new_crtc_state, i) {
8620 if (intel_crtc_needs_modeset(new_crtc_state) ||
8621 new_crtc_state->update_pipe) {
8623 put_domains[crtc->pipe] =
8624 modeset_get_crtc_power_domains(new_crtc_state);
8628 intel_commit_modeset_disables(state);
8630 /* FIXME: Eventually get rid of our crtc->config pointer */
8631 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8632 crtc->config = new_crtc_state;
8634 if (state->modeset) {
8635 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8637 intel_set_cdclk_pre_plane_update(state);
8639 intel_modeset_verify_disabled(dev_priv, state);
8642 intel_sagv_pre_plane_update(state);
8644 /* Complete the events for pipes that have now been disabled */
8645 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8646 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8648 /* Complete events for now disable pipes here. */
8649 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8650 spin_lock_irq(&dev->event_lock);
8651 drm_crtc_send_vblank_event(&crtc->base,
8652 new_crtc_state->uapi.event);
8653 spin_unlock_irq(&dev->event_lock);
8655 new_crtc_state->uapi.event = NULL;
8659 intel_encoders_update_prepare(state);
8661 intel_dbuf_pre_plane_update(state);
8663 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8664 if (new_crtc_state->uapi.async_flip)
8665 intel_crtc_enable_flip_done(state, crtc);
8668 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8669 dev_priv->display->commit_modeset_enables(state);
8671 intel_encoders_update_complete(state);
8674 intel_set_cdclk_post_plane_update(state);
8676 intel_wait_for_vblank_workers(state);
8678 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8679 * already, but still need the state for the delayed optimization. To
8681 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8682 * - schedule that vblank worker _before_ calling hw_done
8683 * - at the start of commit_tail, cancel it _synchrously
8684 * - switch over to the vblank wait helper in the core after that since
8685 * we don't need out special handling any more.
8687 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8689 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8690 if (new_crtc_state->uapi.async_flip)
8691 intel_crtc_disable_flip_done(state, crtc);
8695 * Now that the vblank has passed, we can go ahead and program the
8696 * optimal watermarks on platforms that need two-step watermark
8699 * TODO: Move this (and other cleanup) to an async worker eventually.
8701 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8702 new_crtc_state, i) {
8704 * Gen2 reports pipe underruns whenever all planes are disabled.
8705 * So re-enable underrun reporting after some planes get enabled.
8707 * We do this before .optimize_watermarks() so that we have a
8708 * chance of catching underruns with the intermediate watermarks
8709 * vs. the new plane configuration.
8711 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8712 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8714 intel_optimize_watermarks(state, crtc);
8717 intel_dbuf_post_plane_update(state);
8718 intel_psr_post_plane_update(state);
8720 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8721 intel_post_plane_update(state, crtc);
8723 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8725 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8728 * DSB cleanup is done in cleanup_work aligning with framebuffer
8729 * cleanup. So copy and reset the dsb structure to sync with
8730 * commit_done and later do dsb cleanup in cleanup_work.
8732 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8735 /* Underruns don't always raise interrupts, so check manually */
8736 intel_check_cpu_fifo_underruns(dev_priv);
8737 intel_check_pch_fifo_underruns(dev_priv);
8740 intel_verify_planes(state);
8742 intel_sagv_post_plane_update(state);
8744 drm_atomic_helper_commit_hw_done(&state->base);
8746 if (state->modeset) {
8747 /* As one of the primary mmio accessors, KMS has a high
8748 * likelihood of triggering bugs in unclaimed access. After we
8749 * finish modesetting, see if an error has been flagged, and if
8750 * so enable debugging for the next modeset - and hope we catch
8753 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8754 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8756 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8759 * Defer the cleanup of the old state to a separate worker to not
8760 * impede the current task (userspace for blocking modesets) that
8761 * are executed inline. For out-of-line asynchronous modesets/flips,
8762 * deferring to a new worker seems overkill, but we would place a
8763 * schedule point (cond_resched()) here anyway to keep latencies
8766 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8767 queue_work(system_highpri_wq, &state->base.commit_work);
8770 static void intel_atomic_commit_work(struct work_struct *work)
8772 struct intel_atomic_state *state =
8773 container_of(work, struct intel_atomic_state, base.commit_work);
8775 intel_atomic_commit_tail(state);
8779 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8780 enum i915_sw_fence_notify notify)
8782 struct intel_atomic_state *state =
8783 container_of(fence, struct intel_atomic_state, commit_ready);
8786 case FENCE_COMPLETE:
8787 /* we do blocking waits in the worker, nothing to do here */
8791 struct intel_atomic_helper *helper =
8792 &to_i915(state->base.dev)->atomic_helper;
8794 if (llist_add(&state->freed, &helper->free_list))
8795 schedule_work(&helper->free_work);
8803 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8805 struct intel_plane_state *old_plane_state, *new_plane_state;
8806 struct intel_plane *plane;
8809 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8811 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8812 to_intel_frontbuffer(new_plane_state->hw.fb),
8813 plane->frontbuffer_bit);
8816 static int intel_atomic_commit(struct drm_device *dev,
8817 struct drm_atomic_state *_state,
8820 struct intel_atomic_state *state = to_intel_atomic_state(_state);
8821 struct drm_i915_private *dev_priv = to_i915(dev);
8824 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8826 drm_atomic_state_get(&state->base);
8827 i915_sw_fence_init(&state->commit_ready,
8828 intel_atomic_commit_ready);
8831 * The intel_legacy_cursor_update() fast path takes care
8832 * of avoiding the vblank waits for simple cursor
8833 * movement and flips. For cursor on/off and size changes,
8834 * we want to perform the vblank waits so that watermark
8835 * updates happen during the correct frames. Gen9+ have
8836 * double buffered watermarks and so shouldn't need this.
8838 * Unset state->legacy_cursor_update before the call to
8839 * drm_atomic_helper_setup_commit() because otherwise
8840 * drm_atomic_helper_wait_for_flip_done() is a noop and
8841 * we get FIFO underruns because we didn't wait
8844 * FIXME doing watermarks and fb cleanup from a vblank worker
8845 * (assuming we had any) would solve these problems.
8847 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8848 struct intel_crtc_state *new_crtc_state;
8849 struct intel_crtc *crtc;
8852 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8853 if (new_crtc_state->wm.need_postvbl_update ||
8854 new_crtc_state->update_wm_post)
8855 state->base.legacy_cursor_update = false;
8858 ret = intel_atomic_prepare_commit(state);
8860 drm_dbg_atomic(&dev_priv->drm,
8861 "Preparing state failed with %i\n", ret);
8862 i915_sw_fence_commit(&state->commit_ready);
8863 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8867 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8869 ret = drm_atomic_helper_swap_state(&state->base, true);
8871 intel_atomic_swap_global_state(state);
8874 struct intel_crtc_state *new_crtc_state;
8875 struct intel_crtc *crtc;
8878 i915_sw_fence_commit(&state->commit_ready);
8880 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8881 intel_dsb_cleanup(new_crtc_state);
8883 drm_atomic_helper_cleanup_planes(dev, &state->base);
8884 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8887 intel_shared_dpll_swap_state(state);
8888 intel_atomic_track_fbs(state);
8890 drm_atomic_state_get(&state->base);
8891 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8893 i915_sw_fence_commit(&state->commit_ready);
8894 if (nonblock && state->modeset) {
8895 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8896 } else if (nonblock) {
8897 queue_work(dev_priv->flip_wq, &state->base.commit_work);
8900 flush_workqueue(dev_priv->modeset_wq);
8901 intel_atomic_commit_tail(state);
8908 * intel_plane_destroy - destroy a plane
8909 * @plane: plane to destroy
8911 * Common destruction function for all types of planes (primary, cursor,
8914 void intel_plane_destroy(struct drm_plane *plane)
8916 drm_plane_cleanup(plane);
8917 kfree(to_intel_plane(plane));
8920 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8922 struct intel_plane *plane;
8924 for_each_intel_plane(&dev_priv->drm, plane) {
8925 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8928 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8933 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8934 struct drm_file *file)
8936 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8937 struct drm_crtc *drmmode_crtc;
8938 struct intel_crtc *crtc;
8940 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8944 crtc = to_intel_crtc(drmmode_crtc);
8945 pipe_from_crtc_id->pipe = crtc->pipe;
8950 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8952 struct drm_device *dev = encoder->base.dev;
8953 struct intel_encoder *source_encoder;
8954 u32 possible_clones = 0;
8956 for_each_intel_encoder(dev, source_encoder) {
8957 if (encoders_cloneable(encoder, source_encoder))
8958 possible_clones |= drm_encoder_mask(&source_encoder->base);
8961 return possible_clones;
8964 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8966 struct drm_device *dev = encoder->base.dev;
8967 struct intel_crtc *crtc;
8968 u32 possible_crtcs = 0;
8970 for_each_intel_crtc(dev, crtc) {
8971 if (encoder->pipe_mask & BIT(crtc->pipe))
8972 possible_crtcs |= drm_crtc_mask(&crtc->base);
8975 return possible_crtcs;
8978 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
8980 if (!IS_MOBILE(dev_priv))
8983 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
8986 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
8992 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
8994 if (DISPLAY_VER(dev_priv) >= 9)
8997 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
9000 if (HAS_PCH_LPT_H(dev_priv) &&
9001 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
9004 /* DDI E can't be used if DDI A requires 4 lanes */
9005 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9008 if (!dev_priv->vbt.int_crt_support)
9014 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9016 struct intel_encoder *encoder;
9017 bool dpd_is_edp = false;
9019 intel_pps_unlock_regs_wa(dev_priv);
9021 if (!HAS_DISPLAY(dev_priv))
9024 if (IS_DG2(dev_priv)) {
9025 intel_ddi_init(dev_priv, PORT_A);
9026 intel_ddi_init(dev_priv, PORT_B);
9027 intel_ddi_init(dev_priv, PORT_C);
9028 intel_ddi_init(dev_priv, PORT_D_XELPD);
9029 } else if (IS_ALDERLAKE_P(dev_priv)) {
9030 intel_ddi_init(dev_priv, PORT_A);
9031 intel_ddi_init(dev_priv, PORT_B);
9032 intel_ddi_init(dev_priv, PORT_TC1);
9033 intel_ddi_init(dev_priv, PORT_TC2);
9034 intel_ddi_init(dev_priv, PORT_TC3);
9035 intel_ddi_init(dev_priv, PORT_TC4);
9036 icl_dsi_init(dev_priv);
9037 } else if (IS_ALDERLAKE_S(dev_priv)) {
9038 intel_ddi_init(dev_priv, PORT_A);
9039 intel_ddi_init(dev_priv, PORT_TC1);
9040 intel_ddi_init(dev_priv, PORT_TC2);
9041 intel_ddi_init(dev_priv, PORT_TC3);
9042 intel_ddi_init(dev_priv, PORT_TC4);
9043 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9044 intel_ddi_init(dev_priv, PORT_A);
9045 intel_ddi_init(dev_priv, PORT_B);
9046 intel_ddi_init(dev_priv, PORT_TC1);
9047 intel_ddi_init(dev_priv, PORT_TC2);
9048 } else if (DISPLAY_VER(dev_priv) >= 12) {
9049 intel_ddi_init(dev_priv, PORT_A);
9050 intel_ddi_init(dev_priv, PORT_B);
9051 intel_ddi_init(dev_priv, PORT_TC1);
9052 intel_ddi_init(dev_priv, PORT_TC2);
9053 intel_ddi_init(dev_priv, PORT_TC3);
9054 intel_ddi_init(dev_priv, PORT_TC4);
9055 intel_ddi_init(dev_priv, PORT_TC5);
9056 intel_ddi_init(dev_priv, PORT_TC6);
9057 icl_dsi_init(dev_priv);
9058 } else if (IS_JSL_EHL(dev_priv)) {
9059 intel_ddi_init(dev_priv, PORT_A);
9060 intel_ddi_init(dev_priv, PORT_B);
9061 intel_ddi_init(dev_priv, PORT_C);
9062 intel_ddi_init(dev_priv, PORT_D);
9063 icl_dsi_init(dev_priv);
9064 } else if (DISPLAY_VER(dev_priv) == 11) {
9065 intel_ddi_init(dev_priv, PORT_A);
9066 intel_ddi_init(dev_priv, PORT_B);
9067 intel_ddi_init(dev_priv, PORT_C);
9068 intel_ddi_init(dev_priv, PORT_D);
9069 intel_ddi_init(dev_priv, PORT_E);
9070 intel_ddi_init(dev_priv, PORT_F);
9071 icl_dsi_init(dev_priv);
9072 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9073 intel_ddi_init(dev_priv, PORT_A);
9074 intel_ddi_init(dev_priv, PORT_B);
9075 intel_ddi_init(dev_priv, PORT_C);
9076 vlv_dsi_init(dev_priv);
9077 } else if (DISPLAY_VER(dev_priv) >= 9) {
9078 intel_ddi_init(dev_priv, PORT_A);
9079 intel_ddi_init(dev_priv, PORT_B);
9080 intel_ddi_init(dev_priv, PORT_C);
9081 intel_ddi_init(dev_priv, PORT_D);
9082 intel_ddi_init(dev_priv, PORT_E);
9083 } else if (HAS_DDI(dev_priv)) {
9086 if (intel_ddi_crt_present(dev_priv))
9087 intel_crt_init(dev_priv);
9089 /* Haswell uses DDI functions to detect digital outputs. */
9090 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9092 intel_ddi_init(dev_priv, PORT_A);
9094 found = intel_de_read(dev_priv, SFUSE_STRAP);
9095 if (found & SFUSE_STRAP_DDIB_DETECTED)
9096 intel_ddi_init(dev_priv, PORT_B);
9097 if (found & SFUSE_STRAP_DDIC_DETECTED)
9098 intel_ddi_init(dev_priv, PORT_C);
9099 if (found & SFUSE_STRAP_DDID_DETECTED)
9100 intel_ddi_init(dev_priv, PORT_D);
9101 if (found & SFUSE_STRAP_DDIF_DETECTED)
9102 intel_ddi_init(dev_priv, PORT_F);
9103 } else if (HAS_PCH_SPLIT(dev_priv)) {
9107 * intel_edp_init_connector() depends on this completing first,
9108 * to prevent the registration of both eDP and LVDS and the
9109 * incorrect sharing of the PPS.
9111 intel_lvds_init(dev_priv);
9112 intel_crt_init(dev_priv);
9114 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9116 if (ilk_has_edp_a(dev_priv))
9117 g4x_dp_init(dev_priv, DP_A, PORT_A);
9119 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9120 /* PCH SDVOB multiplex with HDMIB */
9121 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9123 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9124 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9125 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9128 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9129 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9131 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9132 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9134 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9135 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9137 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9138 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9139 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9140 bool has_edp, has_port;
9142 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9143 intel_crt_init(dev_priv);
9146 * The DP_DETECTED bit is the latched state of the DDC
9147 * SDA pin at boot. However since eDP doesn't require DDC
9148 * (no way to plug in a DP->HDMI dongle) the DDC pins for
9149 * eDP ports may have been muxed to an alternate function.
9150 * Thus we can't rely on the DP_DETECTED bit alone to detect
9151 * eDP ports. Consult the VBT as well as DP_DETECTED to
9154 * Sadly the straps seem to be missing sometimes even for HDMI
9155 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9156 * and VBT for the presence of the port. Additionally we can't
9157 * trust the port type the VBT declares as we've seen at least
9158 * HDMI ports that the VBT claim are DP or eDP.
9160 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9161 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9162 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9163 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9164 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9165 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9167 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9168 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9169 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9170 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9171 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9172 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9174 if (IS_CHERRYVIEW(dev_priv)) {
9176 * eDP not supported on port D,
9177 * so no need to worry about it
9179 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9180 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9181 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9182 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9183 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9186 vlv_dsi_init(dev_priv);
9187 } else if (IS_PINEVIEW(dev_priv)) {
9188 intel_lvds_init(dev_priv);
9189 intel_crt_init(dev_priv);
9190 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9193 if (IS_MOBILE(dev_priv))
9194 intel_lvds_init(dev_priv);
9196 intel_crt_init(dev_priv);
9198 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9199 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9200 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9201 if (!found && IS_G4X(dev_priv)) {
9202 drm_dbg_kms(&dev_priv->drm,
9203 "probing HDMI on SDVOB\n");
9204 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9207 if (!found && IS_G4X(dev_priv))
9208 g4x_dp_init(dev_priv, DP_B, PORT_B);
9211 /* Before G4X SDVOC doesn't have its own detect register */
9213 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9214 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9215 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9218 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9220 if (IS_G4X(dev_priv)) {
9221 drm_dbg_kms(&dev_priv->drm,
9222 "probing HDMI on SDVOC\n");
9223 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9225 if (IS_G4X(dev_priv))
9226 g4x_dp_init(dev_priv, DP_C, PORT_C);
9229 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9230 g4x_dp_init(dev_priv, DP_D, PORT_D);
9232 if (SUPPORTS_TV(dev_priv))
9233 intel_tv_init(dev_priv);
9234 } else if (DISPLAY_VER(dev_priv) == 2) {
9235 if (IS_I85X(dev_priv))
9236 intel_lvds_init(dev_priv);
9238 intel_crt_init(dev_priv);
9239 intel_dvo_init(dev_priv);
9242 for_each_intel_encoder(&dev_priv->drm, encoder) {
9243 encoder->base.possible_crtcs =
9244 intel_encoder_possible_crtcs(encoder);
9245 encoder->base.possible_clones =
9246 intel_encoder_possible_clones(encoder);
9249 intel_init_pch_refclk(dev_priv);
9251 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9254 static enum drm_mode_status
9255 intel_mode_valid(struct drm_device *dev,
9256 const struct drm_display_mode *mode)
9258 struct drm_i915_private *dev_priv = to_i915(dev);
9259 int hdisplay_max, htotal_max;
9260 int vdisplay_max, vtotal_max;
9263 * Can't reject DBLSCAN here because Xorg ddxen can add piles
9264 * of DBLSCAN modes to the output's mode list when they detect
9265 * the scaling mode property on the connector. And they don't
9266 * ask the kernel to validate those modes in any way until
9267 * modeset time at which point the client gets a protocol error.
9268 * So in order to not upset those clients we silently ignore the
9269 * DBLSCAN flag on such connectors. For other connectors we will
9270 * reject modes with the DBLSCAN flag in encoder->compute_config().
9271 * And we always reject DBLSCAN modes in connector->mode_valid()
9272 * as we never want such modes on the connector's mode list.
9275 if (mode->vscan > 1)
9276 return MODE_NO_VSCAN;
9278 if (mode->flags & DRM_MODE_FLAG_HSKEW)
9279 return MODE_H_ILLEGAL;
9281 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9282 DRM_MODE_FLAG_NCSYNC |
9283 DRM_MODE_FLAG_PCSYNC))
9286 if (mode->flags & (DRM_MODE_FLAG_BCAST |
9287 DRM_MODE_FLAG_PIXMUX |
9288 DRM_MODE_FLAG_CLKDIV2))
9291 /* Transcoder timing limits */
9292 if (DISPLAY_VER(dev_priv) >= 11) {
9293 hdisplay_max = 16384;
9294 vdisplay_max = 8192;
9297 } else if (DISPLAY_VER(dev_priv) >= 9 ||
9298 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9299 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9300 vdisplay_max = 4096;
9303 } else if (DISPLAY_VER(dev_priv) >= 3) {
9304 hdisplay_max = 4096;
9305 vdisplay_max = 4096;
9309 hdisplay_max = 2048;
9310 vdisplay_max = 2048;
9315 if (mode->hdisplay > hdisplay_max ||
9316 mode->hsync_start > htotal_max ||
9317 mode->hsync_end > htotal_max ||
9318 mode->htotal > htotal_max)
9319 return MODE_H_ILLEGAL;
9321 if (mode->vdisplay > vdisplay_max ||
9322 mode->vsync_start > vtotal_max ||
9323 mode->vsync_end > vtotal_max ||
9324 mode->vtotal > vtotal_max)
9325 return MODE_V_ILLEGAL;
9327 if (DISPLAY_VER(dev_priv) >= 5) {
9328 if (mode->hdisplay < 64 ||
9329 mode->htotal - mode->hdisplay < 32)
9330 return MODE_H_ILLEGAL;
9332 if (mode->vtotal - mode->vdisplay < 5)
9333 return MODE_V_ILLEGAL;
9335 if (mode->htotal - mode->hdisplay < 32)
9336 return MODE_H_ILLEGAL;
9338 if (mode->vtotal - mode->vdisplay < 3)
9339 return MODE_V_ILLEGAL;
9343 * Cantiga+ cannot handle modes with a hsync front porch of 0.
9344 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9346 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9347 mode->hsync_start == mode->hdisplay)
9348 return MODE_H_ILLEGAL;
9353 enum drm_mode_status
9354 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9355 const struct drm_display_mode *mode,
9358 int plane_width_max, plane_height_max;
9361 * intel_mode_valid() should be
9362 * sufficient on older platforms.
9364 if (DISPLAY_VER(dev_priv) < 9)
9368 * Most people will probably want a fullscreen
9369 * plane so let's not advertize modes that are
9372 if (DISPLAY_VER(dev_priv) >= 11) {
9373 plane_width_max = 5120 << bigjoiner;
9374 plane_height_max = 4320;
9376 plane_width_max = 5120;
9377 plane_height_max = 4096;
9380 if (mode->hdisplay > plane_width_max)
9381 return MODE_H_ILLEGAL;
9383 if (mode->vdisplay > plane_height_max)
9384 return MODE_V_ILLEGAL;
9389 static const struct drm_mode_config_funcs intel_mode_funcs = {
9390 .fb_create = intel_user_framebuffer_create,
9391 .get_format_info = intel_fb_get_format_info,
9392 .output_poll_changed = intel_fbdev_output_poll_changed,
9393 .mode_valid = intel_mode_valid,
9394 .atomic_check = intel_atomic_check,
9395 .atomic_commit = intel_atomic_commit,
9396 .atomic_state_alloc = intel_atomic_state_alloc,
9397 .atomic_state_clear = intel_atomic_state_clear,
9398 .atomic_state_free = intel_atomic_state_free,
9401 static const struct drm_i915_display_funcs skl_display_funcs = {
9402 .get_pipe_config = hsw_get_pipe_config,
9403 .crtc_enable = hsw_crtc_enable,
9404 .crtc_disable = hsw_crtc_disable,
9405 .commit_modeset_enables = skl_commit_modeset_enables,
9406 .get_initial_plane_config = skl_get_initial_plane_config,
9409 static const struct drm_i915_display_funcs ddi_display_funcs = {
9410 .get_pipe_config = hsw_get_pipe_config,
9411 .crtc_enable = hsw_crtc_enable,
9412 .crtc_disable = hsw_crtc_disable,
9413 .commit_modeset_enables = intel_commit_modeset_enables,
9414 .get_initial_plane_config = i9xx_get_initial_plane_config,
9417 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9418 .get_pipe_config = ilk_get_pipe_config,
9419 .crtc_enable = ilk_crtc_enable,
9420 .crtc_disable = ilk_crtc_disable,
9421 .commit_modeset_enables = intel_commit_modeset_enables,
9422 .get_initial_plane_config = i9xx_get_initial_plane_config,
9425 static const struct drm_i915_display_funcs vlv_display_funcs = {
9426 .get_pipe_config = i9xx_get_pipe_config,
9427 .crtc_enable = valleyview_crtc_enable,
9428 .crtc_disable = i9xx_crtc_disable,
9429 .commit_modeset_enables = intel_commit_modeset_enables,
9430 .get_initial_plane_config = i9xx_get_initial_plane_config,
9433 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9434 .get_pipe_config = i9xx_get_pipe_config,
9435 .crtc_enable = i9xx_crtc_enable,
9436 .crtc_disable = i9xx_crtc_disable,
9437 .commit_modeset_enables = intel_commit_modeset_enables,
9438 .get_initial_plane_config = i9xx_get_initial_plane_config,
9442 * intel_init_display_hooks - initialize the display modesetting hooks
9443 * @dev_priv: device private
9445 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9447 if (!HAS_DISPLAY(dev_priv))
9450 intel_init_cdclk_hooks(dev_priv);
9451 intel_audio_hooks_init(dev_priv);
9453 intel_dpll_init_clock_hook(dev_priv);
9455 if (DISPLAY_VER(dev_priv) >= 9) {
9456 dev_priv->display = &skl_display_funcs;
9457 } else if (HAS_DDI(dev_priv)) {
9458 dev_priv->display = &ddi_display_funcs;
9459 } else if (HAS_PCH_SPLIT(dev_priv)) {
9460 dev_priv->display = &pch_split_display_funcs;
9461 } else if (IS_CHERRYVIEW(dev_priv) ||
9462 IS_VALLEYVIEW(dev_priv)) {
9463 dev_priv->display = &vlv_display_funcs;
9465 dev_priv->display = &i9xx_display_funcs;
9468 intel_fdi_init_hook(dev_priv);
9471 void intel_modeset_init_hw(struct drm_i915_private *i915)
9473 struct intel_cdclk_state *cdclk_state;
9475 if (!HAS_DISPLAY(i915))
9478 cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9480 intel_update_cdclk(i915);
9481 intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
9482 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9485 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9487 struct drm_plane *plane;
9488 struct intel_crtc *crtc;
9490 for_each_intel_crtc(state->dev, crtc) {
9491 struct intel_crtc_state *crtc_state;
9493 crtc_state = intel_atomic_get_crtc_state(state, crtc);
9494 if (IS_ERR(crtc_state))
9495 return PTR_ERR(crtc_state);
9497 if (crtc_state->hw.active) {
9499 * Preserve the inherited flag to avoid
9500 * taking the full modeset path.
9502 crtc_state->inherited = true;
9506 drm_for_each_plane(plane, state->dev) {
9507 struct drm_plane_state *plane_state;
9509 plane_state = drm_atomic_get_plane_state(state, plane);
9510 if (IS_ERR(plane_state))
9511 return PTR_ERR(plane_state);
9518 * Calculate what we think the watermarks should be for the state we've read
9519 * out of the hardware and then immediately program those watermarks so that
9520 * we ensure the hardware settings match our internal state.
9522 * We can calculate what we think WM's should be by creating a duplicate of the
9523 * current state (which was constructed during hardware readout) and running it
9524 * through the atomic check code to calculate new watermark values in the
9527 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9529 struct drm_atomic_state *state;
9530 struct intel_atomic_state *intel_state;
9531 struct intel_crtc *crtc;
9532 struct intel_crtc_state *crtc_state;
9533 struct drm_modeset_acquire_ctx ctx;
9537 /* Only supported on platforms that use atomic watermark design */
9538 if (!dev_priv->wm_disp->optimize_watermarks)
9541 state = drm_atomic_state_alloc(&dev_priv->drm);
9542 if (drm_WARN_ON(&dev_priv->drm, !state))
9545 intel_state = to_intel_atomic_state(state);
9547 drm_modeset_acquire_init(&ctx, 0);
9550 state->acquire_ctx = &ctx;
9553 * Hardware readout is the only time we don't want to calculate
9554 * intermediate watermarks (since we don't trust the current
9557 if (!HAS_GMCH(dev_priv))
9558 intel_state->skip_intermediate_wm = true;
9560 ret = sanitize_watermarks_add_affected(state);
9564 ret = intel_atomic_check(&dev_priv->drm, state);
9568 /* Write calculated watermark values back */
9569 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9570 crtc_state->wm.need_postvbl_update = true;
9571 intel_optimize_watermarks(intel_state, crtc);
9573 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9577 if (ret == -EDEADLK) {
9578 drm_atomic_state_clear(state);
9579 drm_modeset_backoff(&ctx);
9584 * If we fail here, it means that the hardware appears to be
9585 * programmed in a way that shouldn't be possible, given our
9586 * understanding of watermark requirements. This might mean a
9587 * mistake in the hardware readout code or a mistake in the
9588 * watermark calculations for a given platform. Raise a WARN
9589 * so that this is noticeable.
9591 * If this actually happens, we'll have to just leave the
9592 * BIOS-programmed watermarks untouched and hope for the best.
9594 drm_WARN(&dev_priv->drm, ret,
9595 "Could not determine valid watermarks for inherited state\n");
9597 drm_atomic_state_put(state);
9599 drm_modeset_drop_locks(&ctx);
9600 drm_modeset_acquire_fini(&ctx);
9603 static int intel_initial_commit(struct drm_device *dev)
9605 struct drm_atomic_state *state = NULL;
9606 struct drm_modeset_acquire_ctx ctx;
9607 struct intel_crtc *crtc;
9610 state = drm_atomic_state_alloc(dev);
9614 drm_modeset_acquire_init(&ctx, 0);
9617 state->acquire_ctx = &ctx;
9619 for_each_intel_crtc(dev, crtc) {
9620 struct intel_crtc_state *crtc_state =
9621 intel_atomic_get_crtc_state(state, crtc);
9623 if (IS_ERR(crtc_state)) {
9624 ret = PTR_ERR(crtc_state);
9628 if (crtc_state->hw.active) {
9629 struct intel_encoder *encoder;
9632 * We've not yet detected sink capabilities
9633 * (audio,infoframes,etc.) and thus we don't want to
9634 * force a full state recomputation yet. We want that to
9635 * happen only for the first real commit from userspace.
9636 * So preserve the inherited flag for the time being.
9638 crtc_state->inherited = true;
9640 ret = drm_atomic_add_affected_planes(state, &crtc->base);
9645 * FIXME hack to force a LUT update to avoid the
9646 * plane update forcing the pipe gamma on without
9647 * having a proper LUT loaded. Remove once we
9648 * have readout for pipe gamma enable.
9650 crtc_state->uapi.color_mgmt_changed = true;
9652 for_each_intel_encoder_mask(dev, encoder,
9653 crtc_state->uapi.encoder_mask) {
9654 if (encoder->initial_fastset_check &&
9655 !encoder->initial_fastset_check(encoder, crtc_state)) {
9656 ret = drm_atomic_add_affected_connectors(state,
9665 ret = drm_atomic_commit(state);
9668 if (ret == -EDEADLK) {
9669 drm_atomic_state_clear(state);
9670 drm_modeset_backoff(&ctx);
9674 drm_atomic_state_put(state);
9676 drm_modeset_drop_locks(&ctx);
9677 drm_modeset_acquire_fini(&ctx);
9682 static void intel_mode_config_init(struct drm_i915_private *i915)
9684 struct drm_mode_config *mode_config = &i915->drm.mode_config;
9686 drm_mode_config_init(&i915->drm);
9687 INIT_LIST_HEAD(&i915->global_obj_list);
9689 mode_config->min_width = 0;
9690 mode_config->min_height = 0;
9692 mode_config->preferred_depth = 24;
9693 mode_config->prefer_shadow = 1;
9695 mode_config->funcs = &intel_mode_funcs;
9697 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9700 * Maximum framebuffer dimensions, chosen to match
9701 * the maximum render engine surface size on gen4+.
9703 if (DISPLAY_VER(i915) >= 7) {
9704 mode_config->max_width = 16384;
9705 mode_config->max_height = 16384;
9706 } else if (DISPLAY_VER(i915) >= 4) {
9707 mode_config->max_width = 8192;
9708 mode_config->max_height = 8192;
9709 } else if (DISPLAY_VER(i915) == 3) {
9710 mode_config->max_width = 4096;
9711 mode_config->max_height = 4096;
9713 mode_config->max_width = 2048;
9714 mode_config->max_height = 2048;
9717 if (IS_I845G(i915) || IS_I865G(i915)) {
9718 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9719 mode_config->cursor_height = 1023;
9720 } else if (IS_I830(i915) || IS_I85X(i915) ||
9721 IS_I915G(i915) || IS_I915GM(i915)) {
9722 mode_config->cursor_width = 64;
9723 mode_config->cursor_height = 64;
9725 mode_config->cursor_width = 256;
9726 mode_config->cursor_height = 256;
9730 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9732 intel_atomic_global_obj_cleanup(i915);
9733 drm_mode_config_cleanup(&i915->drm);
9736 /* part #1: call before irq install */
9737 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9741 if (i915_inject_probe_failure(i915))
9744 if (HAS_DISPLAY(i915)) {
9745 ret = drm_vblank_init(&i915->drm,
9746 INTEL_NUM_PIPES(i915));
9751 intel_bios_init(i915);
9753 ret = intel_vga_register(i915);
9757 /* FIXME: completely on the wrong abstraction layer */
9758 intel_power_domains_init_hw(i915, false);
9760 if (!HAS_DISPLAY(i915))
9763 intel_dmc_ucode_init(i915);
9765 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9766 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9767 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9769 i915->framestart_delay = 1; /* 1-4 */
9771 i915->window2_delay = 0; /* No DSB so no window2 delay */
9773 intel_mode_config_init(i915);
9775 ret = intel_cdclk_init(i915);
9777 goto cleanup_vga_client_pw_domain_dmc;
9779 ret = intel_dbuf_init(i915);
9781 goto cleanup_vga_client_pw_domain_dmc;
9783 ret = intel_bw_init(i915);
9785 goto cleanup_vga_client_pw_domain_dmc;
9787 init_llist_head(&i915->atomic_helper.free_list);
9788 INIT_WORK(&i915->atomic_helper.free_work,
9789 intel_atomic_helper_free_state_worker);
9791 intel_init_quirks(i915);
9793 intel_fbc_init(i915);
9797 cleanup_vga_client_pw_domain_dmc:
9798 intel_dmc_ucode_fini(i915);
9799 intel_power_domains_driver_remove(i915);
9800 intel_vga_unregister(i915);
9802 intel_bios_driver_remove(i915);
9807 /* part #2: call after irq install, but before gem init */
9808 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9810 struct drm_device *dev = &i915->drm;
9812 struct intel_crtc *crtc;
9815 if (!HAS_DISPLAY(i915))
9818 intel_init_pm(i915);
9820 intel_panel_sanitize_ssc(i915);
9822 intel_pps_setup(i915);
9824 intel_gmbus_setup(i915);
9826 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9827 INTEL_NUM_PIPES(i915),
9828 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9830 for_each_pipe(i915, pipe) {
9831 ret = intel_crtc_init(i915, pipe);
9833 intel_mode_config_cleanup(i915);
9838 intel_plane_possible_crtcs_init(i915);
9839 intel_shared_dpll_init(dev);
9840 intel_fdi_pll_freq_update(i915);
9842 intel_update_czclk(i915);
9843 intel_modeset_init_hw(i915);
9844 intel_dpll_update_ref_clks(i915);
9846 intel_hdcp_component_init(i915);
9848 if (i915->max_cdclk_freq == 0)
9849 intel_update_max_cdclk(i915);
9852 * If the platform has HTI, we need to find out whether it has reserved
9853 * any display resources before we create our display outputs.
9855 if (INTEL_INFO(i915)->display.has_hti)
9856 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9858 /* Just disable it once at startup */
9859 intel_vga_disable(i915);
9860 intel_setup_outputs(i915);
9862 drm_modeset_lock_all(dev);
9863 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9864 intel_acpi_assign_connector_fwnodes(i915);
9865 drm_modeset_unlock_all(dev);
9867 for_each_intel_crtc(dev, crtc) {
9868 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9870 intel_crtc_initial_plane_config(crtc);
9874 * Make sure hardware watermarks really match the state we read out.
9875 * Note that we need to do this after reconstructing the BIOS fb's
9876 * since the watermark calculation done here will use pstate->fb.
9878 if (!HAS_GMCH(i915))
9879 sanitize_watermarks(i915);
9884 /* part #3: call after gem init */
9885 int intel_modeset_init(struct drm_i915_private *i915)
9889 if (!HAS_DISPLAY(i915))
9893 * Force all active planes to recompute their states. So that on
9894 * mode_setcrtc after probe, all the intel_plane_state variables
9895 * are already calculated and there is no assert_plane warnings
9898 ret = intel_initial_commit(&i915->drm);
9900 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9902 intel_overlay_setup(i915);
9904 ret = intel_fbdev_init(&i915->drm);
9908 /* Only enable hotplug handling once the fbdev is fully set up. */
9909 intel_hpd_init(i915);
9910 intel_hpd_poll_disable(i915);
9912 intel_init_ipc(i915);
9917 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9919 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9920 /* 640x480@60Hz, ~25175 kHz */
9921 struct dpll clock = {
9931 drm_WARN_ON(&dev_priv->drm,
9932 i9xx_calc_dpll_params(48000, &clock) != 25154);
9934 drm_dbg_kms(&dev_priv->drm,
9935 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9936 pipe_name(pipe), clock.vco, clock.dot);
9938 fp = i9xx_dpll_compute_fp(&clock);
9939 dpll = DPLL_DVO_2X_MODE |
9941 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9942 PLL_P2_DIVIDE_BY_4 |
9943 PLL_REF_INPUT_DREFCLK |
9946 intel_de_write(dev_priv, FP0(pipe), fp);
9947 intel_de_write(dev_priv, FP1(pipe), fp);
9949 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9950 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9951 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9952 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9953 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9954 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9955 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9958 * Apparently we need to have VGA mode enabled prior to changing
9959 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9960 * dividers, even though the register value does change.
9962 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9963 intel_de_write(dev_priv, DPLL(pipe), dpll);
9965 /* Wait for the clocks to stabilize. */
9966 intel_de_posting_read(dev_priv, DPLL(pipe));
9969 /* The pixel multiplier can only be updated once the
9970 * DPLL is enabled and the clocks are stable.
9972 * So write it again.
9974 intel_de_write(dev_priv, DPLL(pipe), dpll);
9976 /* We do this three times for luck */
9977 for (i = 0; i < 3 ; i++) {
9978 intel_de_write(dev_priv, DPLL(pipe), dpll);
9979 intel_de_posting_read(dev_priv, DPLL(pipe));
9980 udelay(150); /* wait for warmup */
9983 intel_de_write(dev_priv, PIPECONF(pipe),
9984 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
9985 intel_de_posting_read(dev_priv, PIPECONF(pipe));
9987 intel_wait_for_pipe_scanline_moving(crtc);
9990 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9992 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9994 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
9997 drm_WARN_ON(&dev_priv->drm,
9998 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
9999 DISPLAY_PLANE_ENABLE);
10000 drm_WARN_ON(&dev_priv->drm,
10001 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
10002 DISPLAY_PLANE_ENABLE);
10003 drm_WARN_ON(&dev_priv->drm,
10004 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
10005 DISPLAY_PLANE_ENABLE);
10006 drm_WARN_ON(&dev_priv->drm,
10007 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
10008 drm_WARN_ON(&dev_priv->drm,
10009 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
10011 intel_de_write(dev_priv, PIPECONF(pipe), 0);
10012 intel_de_posting_read(dev_priv, PIPECONF(pipe));
10014 intel_wait_for_pipe_scanline_stopped(crtc);
10016 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10017 intel_de_posting_read(dev_priv, DPLL(pipe));
10021 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10023 struct intel_crtc *crtc;
10025 if (DISPLAY_VER(dev_priv) >= 4)
10028 for_each_intel_crtc(&dev_priv->drm, crtc) {
10029 struct intel_plane *plane =
10030 to_intel_plane(crtc->base.primary);
10031 struct intel_crtc *plane_crtc;
10034 if (!plane->get_hw_state(plane, &pipe))
10037 if (pipe == crtc->pipe)
10040 drm_dbg_kms(&dev_priv->drm,
10041 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10042 plane->base.base.id, plane->base.name);
10044 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
10045 intel_plane_disable_noatomic(plane_crtc, plane);
10049 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10051 struct drm_device *dev = crtc->base.dev;
10052 struct intel_encoder *encoder;
10054 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10060 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10062 struct drm_device *dev = encoder->base.dev;
10063 struct intel_connector *connector;
10065 for_each_connector_on_encoder(dev, &encoder->base, connector)
10071 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10072 enum pipe pch_transcoder)
10074 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10075 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10078 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10080 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10081 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10082 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10084 if (DISPLAY_VER(dev_priv) >= 9 ||
10085 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10086 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10089 if (transcoder_is_dsi(cpu_transcoder))
10092 val = intel_de_read(dev_priv, reg);
10093 val &= ~HSW_FRAME_START_DELAY_MASK;
10094 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10095 intel_de_write(dev_priv, reg, val);
10097 i915_reg_t reg = PIPECONF(cpu_transcoder);
10100 val = intel_de_read(dev_priv, reg);
10101 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10102 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10103 intel_de_write(dev_priv, reg, val);
10106 if (!crtc_state->has_pch_encoder)
10109 if (HAS_PCH_IBX(dev_priv)) {
10110 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10113 val = intel_de_read(dev_priv, reg);
10114 val &= ~TRANS_FRAME_START_DELAY_MASK;
10115 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10116 intel_de_write(dev_priv, reg, val);
10118 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10119 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10122 val = intel_de_read(dev_priv, reg);
10123 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10124 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10125 intel_de_write(dev_priv, reg, val);
10129 static void intel_sanitize_crtc(struct intel_crtc *crtc,
10130 struct drm_modeset_acquire_ctx *ctx)
10132 struct drm_device *dev = crtc->base.dev;
10133 struct drm_i915_private *dev_priv = to_i915(dev);
10134 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10136 if (crtc_state->hw.active) {
10137 struct intel_plane *plane;
10139 /* Clear any frame start delays used for debugging left by the BIOS */
10140 intel_sanitize_frame_start_delay(crtc_state);
10142 /* Disable everything but the primary plane */
10143 for_each_intel_plane_on_crtc(dev, crtc, plane) {
10144 const struct intel_plane_state *plane_state =
10145 to_intel_plane_state(plane->base.state);
10147 if (plane_state->uapi.visible &&
10148 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10149 intel_plane_disable_noatomic(crtc, plane);
10152 /* Disable any background color/etc. set by the BIOS */
10153 intel_color_commit(crtc_state);
10156 /* Adjust the state of the output pipe according to whether we
10157 * have active connectors/encoders. */
10158 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10159 !crtc_state->bigjoiner_slave)
10160 intel_crtc_disable_noatomic(crtc, ctx);
10162 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10164 * We start out with underrun reporting disabled to avoid races.
10165 * For correct bookkeeping mark this on active crtcs.
10167 * Also on gmch platforms we dont have any hardware bits to
10168 * disable the underrun reporting. Which means we need to start
10169 * out with underrun reporting disabled also on inactive pipes,
10170 * since otherwise we'll complain about the garbage we read when
10171 * e.g. coming up after runtime pm.
10173 * No protection against concurrent access is required - at
10174 * worst a fifo underrun happens which also sets this to false.
10176 crtc->cpu_fifo_underrun_disabled = true;
10178 * We track the PCH trancoder underrun reporting state
10179 * within the crtc. With crtc for pipe A housing the underrun
10180 * reporting state for PCH transcoder A, crtc for pipe B housing
10181 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10182 * and marking underrun reporting as disabled for the non-existing
10183 * PCH transcoders B and C would prevent enabling the south
10184 * error interrupt (see cpt_can_enable_serr_int()).
10186 if (has_pch_trancoder(dev_priv, crtc->pipe))
10187 crtc->pch_fifo_underrun_disabled = true;
10191 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10193 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10196 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10197 * the hardware when a high res displays plugged in. DPLL P
10198 * divider is zero, and the pipe timings are bonkers. We'll
10199 * try to disable everything in that case.
10201 * FIXME would be nice to be able to sanitize this state
10202 * without several WARNs, but for now let's take the easy
10205 return IS_SANDYBRIDGE(dev_priv) &&
10206 crtc_state->hw.active &&
10207 crtc_state->shared_dpll &&
10208 crtc_state->port_clock == 0;
10211 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10213 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10214 struct intel_connector *connector;
10215 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10216 struct intel_crtc_state *crtc_state = crtc ?
10217 to_intel_crtc_state(crtc->base.state) : NULL;
10219 /* We need to check both for a crtc link (meaning that the
10220 * encoder is active and trying to read from a pipe) and the
10221 * pipe itself being active. */
10222 bool has_active_crtc = crtc_state &&
10223 crtc_state->hw.active;
10225 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10226 drm_dbg_kms(&dev_priv->drm,
10227 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10228 pipe_name(crtc->pipe));
10229 has_active_crtc = false;
10232 connector = intel_encoder_find_connector(encoder);
10233 if (connector && !has_active_crtc) {
10234 drm_dbg_kms(&dev_priv->drm,
10235 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10236 encoder->base.base.id,
10237 encoder->base.name);
10239 /* Connector is active, but has no active pipe. This is
10240 * fallout from our resume register restoring. Disable
10241 * the encoder manually again. */
10243 struct drm_encoder *best_encoder;
10245 drm_dbg_kms(&dev_priv->drm,
10246 "[ENCODER:%d:%s] manually disabled\n",
10247 encoder->base.base.id,
10248 encoder->base.name);
10250 /* avoid oopsing in case the hooks consult best_encoder */
10251 best_encoder = connector->base.state->best_encoder;
10252 connector->base.state->best_encoder = &encoder->base;
10254 /* FIXME NULL atomic state passed! */
10255 if (encoder->disable)
10256 encoder->disable(NULL, encoder, crtc_state,
10257 connector->base.state);
10258 if (encoder->post_disable)
10259 encoder->post_disable(NULL, encoder, crtc_state,
10260 connector->base.state);
10262 connector->base.state->best_encoder = best_encoder;
10264 encoder->base.crtc = NULL;
10266 /* Inconsistent output/port/pipe state happens presumably due to
10267 * a bug in one of the get_hw_state functions. Or someplace else
10268 * in our code, like the register restore mess on resume. Clamp
10269 * things to off as a safer default. */
10271 connector->base.dpms = DRM_MODE_DPMS_OFF;
10272 connector->base.encoder = NULL;
10275 /* notify opregion of the sanitized encoder state */
10276 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10278 if (HAS_DDI(dev_priv))
10279 intel_ddi_sanitize_encoder_pll_mapping(encoder);
10282 /* FIXME read out full plane state for all planes */
10283 static void readout_plane_state(struct drm_i915_private *dev_priv)
10285 struct intel_plane *plane;
10286 struct intel_crtc *crtc;
10288 for_each_intel_plane(&dev_priv->drm, plane) {
10289 struct intel_plane_state *plane_state =
10290 to_intel_plane_state(plane->base.state);
10291 struct intel_crtc_state *crtc_state;
10292 enum pipe pipe = PIPE_A;
10295 visible = plane->get_hw_state(plane, &pipe);
10297 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10298 crtc_state = to_intel_crtc_state(crtc->base.state);
10300 intel_set_plane_visible(crtc_state, plane_state, visible);
10302 drm_dbg_kms(&dev_priv->drm,
10303 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10304 plane->base.base.id, plane->base.name,
10305 enableddisabled(visible), pipe_name(pipe));
10308 for_each_intel_crtc(&dev_priv->drm, crtc) {
10309 struct intel_crtc_state *crtc_state =
10310 to_intel_crtc_state(crtc->base.state);
10312 fixup_plane_bitmasks(crtc_state);
10316 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10318 struct drm_i915_private *dev_priv = to_i915(dev);
10319 struct intel_cdclk_state *cdclk_state =
10320 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10321 struct intel_dbuf_state *dbuf_state =
10322 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10324 struct intel_crtc *crtc;
10325 struct intel_encoder *encoder;
10326 struct intel_connector *connector;
10327 struct drm_connector_list_iter conn_iter;
10328 u8 active_pipes = 0;
10330 for_each_intel_crtc(dev, crtc) {
10331 struct intel_crtc_state *crtc_state =
10332 to_intel_crtc_state(crtc->base.state);
10334 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10335 intel_crtc_free_hw_state(crtc_state);
10336 intel_crtc_state_reset(crtc_state, crtc);
10338 intel_crtc_get_pipe_config(crtc_state);
10340 crtc_state->hw.enable = crtc_state->hw.active;
10342 crtc->base.enabled = crtc_state->hw.enable;
10343 crtc->active = crtc_state->hw.active;
10345 if (crtc_state->hw.active)
10346 active_pipes |= BIT(crtc->pipe);
10348 drm_dbg_kms(&dev_priv->drm,
10349 "[CRTC:%d:%s] hw state readout: %s\n",
10350 crtc->base.base.id, crtc->base.name,
10351 enableddisabled(crtc_state->hw.active));
10354 cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10356 readout_plane_state(dev_priv);
10358 for_each_intel_encoder(dev, encoder) {
10359 struct intel_crtc_state *crtc_state = NULL;
10363 if (encoder->get_hw_state(encoder, &pipe)) {
10364 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10365 crtc_state = to_intel_crtc_state(crtc->base.state);
10367 encoder->base.crtc = &crtc->base;
10368 intel_encoder_get_config(encoder, crtc_state);
10370 /* read out to slave crtc as well for bigjoiner */
10371 if (crtc_state->bigjoiner) {
10372 /* encoder should read be linked to bigjoiner master */
10373 WARN_ON(crtc_state->bigjoiner_slave);
10375 crtc = crtc_state->bigjoiner_linked_crtc;
10376 crtc_state = to_intel_crtc_state(crtc->base.state);
10377 intel_encoder_get_config(encoder, crtc_state);
10380 encoder->base.crtc = NULL;
10383 if (encoder->sync_state)
10384 encoder->sync_state(encoder, crtc_state);
10386 drm_dbg_kms(&dev_priv->drm,
10387 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10388 encoder->base.base.id, encoder->base.name,
10389 enableddisabled(encoder->base.crtc),
10393 intel_dpll_readout_hw_state(dev_priv);
10395 drm_connector_list_iter_begin(dev, &conn_iter);
10396 for_each_intel_connector_iter(connector, &conn_iter) {
10397 if (connector->get_hw_state(connector)) {
10398 struct intel_crtc_state *crtc_state;
10399 struct intel_crtc *crtc;
10401 connector->base.dpms = DRM_MODE_DPMS_ON;
10403 encoder = intel_attached_encoder(connector);
10404 connector->base.encoder = &encoder->base;
10406 crtc = to_intel_crtc(encoder->base.crtc);
10407 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10409 if (crtc_state && crtc_state->hw.active) {
10411 * This has to be done during hardware readout
10412 * because anything calling .crtc_disable may
10413 * rely on the connector_mask being accurate.
10415 crtc_state->uapi.connector_mask |=
10416 drm_connector_mask(&connector->base);
10417 crtc_state->uapi.encoder_mask |=
10418 drm_encoder_mask(&encoder->base);
10421 connector->base.dpms = DRM_MODE_DPMS_OFF;
10422 connector->base.encoder = NULL;
10424 drm_dbg_kms(&dev_priv->drm,
10425 "[CONNECTOR:%d:%s] hw state readout: %s\n",
10426 connector->base.base.id, connector->base.name,
10427 enableddisabled(connector->base.encoder));
10429 drm_connector_list_iter_end(&conn_iter);
10431 for_each_intel_crtc(dev, crtc) {
10432 struct intel_bw_state *bw_state =
10433 to_intel_bw_state(dev_priv->bw_obj.state);
10434 struct intel_crtc_state *crtc_state =
10435 to_intel_crtc_state(crtc->base.state);
10436 struct intel_plane *plane;
10439 if (crtc_state->hw.active) {
10441 * The initial mode needs to be set in order to keep
10442 * the atomic core happy. It wants a valid mode if the
10443 * crtc's enabled, so we do the above call.
10445 * But we don't set all the derived state fully, hence
10446 * set a flag to indicate that a full recalculation is
10447 * needed on the next commit.
10449 crtc_state->inherited = true;
10451 intel_crtc_update_active_timings(crtc_state);
10453 intel_crtc_copy_hw_to_uapi_state(crtc_state);
10456 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10457 const struct intel_plane_state *plane_state =
10458 to_intel_plane_state(plane->base.state);
10461 * FIXME don't have the fb yet, so can't
10462 * use intel_plane_data_rate() :(
10464 if (plane_state->uapi.visible)
10465 crtc_state->data_rate[plane->id] =
10466 4 * crtc_state->pixel_rate;
10468 * FIXME don't have the fb yet, so can't
10469 * use plane->min_cdclk() :(
10471 if (plane_state->uapi.visible && plane->min_cdclk) {
10472 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10473 crtc_state->min_cdclk[plane->id] =
10474 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10476 crtc_state->min_cdclk[plane->id] =
10477 crtc_state->pixel_rate;
10479 drm_dbg_kms(&dev_priv->drm,
10480 "[PLANE:%d:%s] min_cdclk %d kHz\n",
10481 plane->base.base.id, plane->base.name,
10482 crtc_state->min_cdclk[plane->id]);
10485 if (crtc_state->hw.active) {
10486 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10487 if (drm_WARN_ON(dev, min_cdclk < 0))
10491 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10492 cdclk_state->min_voltage_level[crtc->pipe] =
10493 crtc_state->min_voltage_level;
10495 intel_bw_crtc_update(bw_state, crtc_state);
10497 intel_pipe_config_sanity_check(dev_priv, crtc_state);
10502 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10504 struct intel_encoder *encoder;
10506 for_each_intel_encoder(&dev_priv->drm, encoder) {
10507 struct intel_crtc_state *crtc_state;
10509 if (!encoder->get_power_domains)
10513 * MST-primary and inactive encoders don't have a crtc state
10514 * and neither of these require any power domain references.
10516 if (!encoder->base.crtc)
10519 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10520 encoder->get_power_domains(encoder, crtc_state);
10524 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10527 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10528 * Also known as Wa_14010480278.
10530 if (IS_DISPLAY_VER(dev_priv, 10, 12))
10531 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10532 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10534 if (IS_HASWELL(dev_priv)) {
10536 * WaRsPkgCStateDisplayPMReq:hsw
10537 * System hang if this isn't done before disabling all planes!
10539 intel_de_write(dev_priv, CHICKEN_PAR1_1,
10540 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10543 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10544 /* Display WA #1142:kbl,cfl,cml */
10545 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10546 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10547 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10548 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10549 KBL_ARB_FILL_SPARE_14);
10553 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10554 enum port port, i915_reg_t hdmi_reg)
10556 u32 val = intel_de_read(dev_priv, hdmi_reg);
10558 if (val & SDVO_ENABLE ||
10559 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10562 drm_dbg_kms(&dev_priv->drm,
10563 "Sanitizing transcoder select for HDMI %c\n",
10566 val &= ~SDVO_PIPE_SEL_MASK;
10567 val |= SDVO_PIPE_SEL(PIPE_A);
10569 intel_de_write(dev_priv, hdmi_reg, val);
10572 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10573 enum port port, i915_reg_t dp_reg)
10575 u32 val = intel_de_read(dev_priv, dp_reg);
10577 if (val & DP_PORT_EN ||
10578 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10581 drm_dbg_kms(&dev_priv->drm,
10582 "Sanitizing transcoder select for DP %c\n",
10585 val &= ~DP_PIPE_SEL_MASK;
10586 val |= DP_PIPE_SEL(PIPE_A);
10588 intel_de_write(dev_priv, dp_reg, val);
10591 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10594 * The BIOS may select transcoder B on some of the PCH
10595 * ports even it doesn't enable the port. This would trip
10596 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10597 * Sanitize the transcoder select bits to prevent that. We
10598 * assume that the BIOS never actually enabled the port,
10599 * because if it did we'd actually have to toggle the port
10600 * on and back off to make the transcoder A select stick
10601 * (see. intel_dp_link_down(), intel_disable_hdmi(),
10602 * intel_disable_sdvo()).
10604 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10605 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10606 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10608 /* PCH SDVOB multiplex with HDMIB */
10609 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10610 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10611 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10614 /* Scan out the current hw modeset state,
10615 * and sanitizes it to the current state
10618 intel_modeset_setup_hw_state(struct drm_device *dev,
10619 struct drm_modeset_acquire_ctx *ctx)
10621 struct drm_i915_private *dev_priv = to_i915(dev);
10622 struct intel_encoder *encoder;
10623 struct intel_crtc *crtc;
10624 intel_wakeref_t wakeref;
10626 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10628 intel_early_display_was(dev_priv);
10629 intel_modeset_readout_hw_state(dev);
10631 /* HW state is read out, now we need to sanitize this mess. */
10632 get_encoder_power_domains(dev_priv);
10634 if (HAS_PCH_IBX(dev_priv))
10635 ibx_sanitize_pch_ports(dev_priv);
10638 * intel_sanitize_plane_mapping() may need to do vblank
10639 * waits, so we need vblank interrupts restored beforehand.
10641 for_each_intel_crtc(&dev_priv->drm, crtc) {
10642 struct intel_crtc_state *crtc_state =
10643 to_intel_crtc_state(crtc->base.state);
10645 drm_crtc_vblank_reset(&crtc->base);
10647 if (crtc_state->hw.active)
10648 intel_crtc_vblank_on(crtc_state);
10651 intel_sanitize_plane_mapping(dev_priv);
10653 for_each_intel_encoder(dev, encoder)
10654 intel_sanitize_encoder(encoder);
10656 for_each_intel_crtc(&dev_priv->drm, crtc) {
10657 struct intel_crtc_state *crtc_state =
10658 to_intel_crtc_state(crtc->base.state);
10660 intel_sanitize_crtc(crtc, ctx);
10661 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10664 intel_modeset_update_connector_atomic_state(dev);
10666 intel_dpll_sanitize_state(dev_priv);
10668 if (IS_G4X(dev_priv)) {
10669 g4x_wm_get_hw_state(dev_priv);
10670 g4x_wm_sanitize(dev_priv);
10671 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10672 vlv_wm_get_hw_state(dev_priv);
10673 vlv_wm_sanitize(dev_priv);
10674 } else if (DISPLAY_VER(dev_priv) >= 9) {
10675 skl_wm_get_hw_state(dev_priv);
10676 } else if (HAS_PCH_SPLIT(dev_priv)) {
10677 ilk_wm_get_hw_state(dev_priv);
10680 for_each_intel_crtc(dev, crtc) {
10681 struct intel_crtc_state *crtc_state =
10682 to_intel_crtc_state(crtc->base.state);
10685 put_domains = modeset_get_crtc_power_domains(crtc_state);
10686 if (drm_WARN_ON(dev, put_domains))
10687 modeset_put_crtc_power_domains(crtc, put_domains);
10690 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10693 void intel_display_resume(struct drm_device *dev)
10695 struct drm_i915_private *dev_priv = to_i915(dev);
10696 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10697 struct drm_modeset_acquire_ctx ctx;
10700 if (!HAS_DISPLAY(dev_priv))
10703 dev_priv->modeset_restore_state = NULL;
10705 state->acquire_ctx = &ctx;
10707 drm_modeset_acquire_init(&ctx, 0);
10710 ret = drm_modeset_lock_all_ctx(dev, &ctx);
10711 if (ret != -EDEADLK)
10714 drm_modeset_backoff(&ctx);
10718 ret = __intel_display_resume(dev, state, &ctx);
10720 intel_enable_ipc(dev_priv);
10721 drm_modeset_drop_locks(&ctx);
10722 drm_modeset_acquire_fini(&ctx);
10725 drm_err(&dev_priv->drm,
10726 "Restoring old state failed with %i\n", ret);
10728 drm_atomic_state_put(state);
10731 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10733 struct intel_connector *connector;
10734 struct drm_connector_list_iter conn_iter;
10736 /* Kill all the work that may have been queued by hpd. */
10737 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10738 for_each_intel_connector_iter(connector, &conn_iter) {
10739 if (connector->modeset_retry_work.func)
10740 cancel_work_sync(&connector->modeset_retry_work);
10741 if (connector->hdcp.shim) {
10742 cancel_delayed_work_sync(&connector->hdcp.check_work);
10743 cancel_work_sync(&connector->hdcp.prop_work);
10746 drm_connector_list_iter_end(&conn_iter);
10749 /* part #1: call before irq uninstall */
10750 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10752 if (!HAS_DISPLAY(i915))
10755 flush_workqueue(i915->flip_wq);
10756 flush_workqueue(i915->modeset_wq);
10758 flush_work(&i915->atomic_helper.free_work);
10759 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10762 /* part #2: call after irq uninstall */
10763 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10765 if (!HAS_DISPLAY(i915))
10769 * Due to the hpd irq storm handling the hotplug work can re-arm the
10770 * poll handlers. Hence disable polling after hpd handling is shut down.
10772 intel_hpd_poll_fini(i915);
10775 * MST topology needs to be suspended so we don't have any calls to
10776 * fbdev after it's finalized. MST will be destroyed later as part of
10777 * drm_mode_config_cleanup()
10779 intel_dp_mst_suspend(i915);
10781 /* poll work can call into fbdev, hence clean that up afterwards */
10782 intel_fbdev_fini(i915);
10784 intel_unregister_dsm_handler();
10786 intel_fbc_global_disable(i915);
10788 /* flush any delayed tasks or pending work */
10789 flush_scheduled_work();
10791 intel_hdcp_component_fini(i915);
10793 intel_mode_config_cleanup(i915);
10795 intel_overlay_cleanup(i915);
10797 intel_gmbus_teardown(i915);
10799 destroy_workqueue(i915->flip_wq);
10800 destroy_workqueue(i915->modeset_wq);
10802 intel_fbc_cleanup(i915);
10805 /* part #3: call after gem init */
10806 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10808 intel_dmc_ucode_fini(i915);
10810 intel_power_domains_driver_remove(i915);
10812 intel_vga_unregister(i915);
10814 intel_bios_driver_remove(i915);
10817 bool intel_modeset_probe_defer(struct pci_dev *pdev)
10819 struct drm_privacy_screen *privacy_screen;
10822 * apple-gmux is needed on dual GPU MacBook Pro
10823 * to probe the panel if we're the inactive GPU.
10825 if (vga_switcheroo_client_probe_defer(pdev))
10828 /* If the LCD panel has a privacy-screen, wait for it */
10829 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10830 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10833 drm_privacy_screen_put(privacy_screen);
10838 void intel_display_driver_register(struct drm_i915_private *i915)
10840 if (!HAS_DISPLAY(i915))
10843 intel_display_debugfs_register(i915);
10845 /* Must be done after probing outputs */
10846 intel_opregion_register(i915);
10847 acpi_video_register();
10849 intel_audio_init(i915);
10852 * Some ports require correctly set-up hpd registers for
10853 * detection to work properly (leading to ghost connected
10854 * connector status), e.g. VGA on gm45. Hence we can only set
10855 * up the initial fbdev config after hpd irqs are fully
10856 * enabled. We do it last so that the async config cannot run
10857 * before the connectors are registered.
10859 intel_fbdev_initial_config_async(&i915->drm);
10862 * We need to coordinate the hotplugs with the asynchronous
10863 * fbdev configuration, for which we use the
10864 * fbdev->async_cookie.
10866 drm_kms_helper_poll_init(&i915->drm);
10869 void intel_display_driver_unregister(struct drm_i915_private *i915)
10871 if (!HAS_DISPLAY(i915))
10874 intel_fbdev_unregister(i915);
10875 intel_audio_deinit(i915);
10878 * After flushing the fbdev (incl. a late async config which
10879 * will have delayed queuing of a hotplug event), then flush
10880 * the hotplug events.
10882 drm_kms_helper_poll_fini(&i915->drm);
10883 drm_atomic_helper_shutdown(&i915->drm);
10885 acpi_video_unregister();
10886 intel_opregion_unregister(i915);