2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 #include <linux/vga_switcheroo.h>
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_dp_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_privacy_screen_consumer.h>
46 #include <drm/drm_probe_helper.h>
47 #include <drm/drm_rect.h>
49 #include "display/intel_audio.h"
50 #include "display/intel_crt.h"
51 #include "display/intel_ddi.h"
52 #include "display/intel_display_debugfs.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
73 #include "gt/gen8_ppgtt.h"
79 #include "intel_acpi.h"
80 #include "intel_atomic.h"
81 #include "intel_atomic_plane.h"
83 #include "intel_cdclk.h"
84 #include "intel_color.h"
85 #include "intel_crtc.h"
87 #include "intel_display_types.h"
88 #include "intel_dmc.h"
89 #include "intel_dp_link_training.h"
90 #include "intel_dpt.h"
91 #include "intel_fbc.h"
92 #include "intel_fbdev.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdcp.h"
97 #include "intel_hotplug.h"
98 #include "intel_overlay.h"
99 #include "intel_panel.h"
100 #include "intel_pch_display.h"
101 #include "intel_pch_refclk.h"
102 #include "intel_pcode.h"
103 #include "intel_pipe_crc.h"
104 #include "intel_plane_initial.h"
105 #include "intel_pm.h"
106 #include "intel_pps.h"
107 #include "intel_psr.h"
108 #include "intel_quirks.h"
109 #include "intel_sprite.h"
110 #include "intel_tc.h"
111 #include "intel_vga.h"
112 #include "i9xx_plane.h"
113 #include "skl_scaler.h"
114 #include "skl_universal_plane.h"
115 #include "vlv_dsi_pll.h"
116 #include "vlv_sideband.h"
119 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
120 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
121 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
122 const struct intel_link_m_n *m_n,
123 const struct intel_link_m_n *m2_n2);
124 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
126 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
127 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
128 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
129 static void intel_modeset_setup_hw_state(struct drm_device *dev,
130 struct drm_modeset_acquire_ctx *ctx);
133 * intel_update_watermarks - update FIFO watermark values based on current modes
134 * @dev_priv: i915 device
136 * Calculate watermark values for the various WM regs based on current mode
137 * and plane configuration.
139 * There are several cases to deal with here:
140 * - normal (i.e. non-self-refresh)
141 * - self-refresh (SR) mode
142 * - lines are large relative to FIFO size (buffer can hold up to 2)
143 * - lines are small relative to FIFO size (buffer can hold more than 2
144 * lines), so need to account for TLB latency
146 * The normal calculation is:
147 * watermark = dotclock * bytes per pixel * latency
148 * where latency is platform & configuration dependent (we assume pessimal
151 * The SR calculation is:
152 * watermark = (trunc(latency/line time)+1) * surface width *
155 * line time = htotal / dotclock
156 * surface width = hdisplay for normal plane and 64 for cursor
157 * and latency is assumed to be high, as above.
159 * The final value programmed to the register should always be rounded up,
160 * and include an extra 2 entries to account for clock crossings.
162 * We don't use the sprite, so we can ignore that. And on Crestline we have
163 * to set the non-SR watermarks to 8.
165 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
167 if (dev_priv->wm_disp->update_wm)
168 dev_priv->wm_disp->update_wm(dev_priv);
171 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
172 struct intel_crtc *crtc)
174 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
175 if (dev_priv->wm_disp->compute_pipe_wm)
176 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
180 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
181 struct intel_crtc *crtc)
183 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
184 if (!dev_priv->wm_disp->compute_intermediate_wm)
186 if (drm_WARN_ON(&dev_priv->drm,
187 !dev_priv->wm_disp->compute_pipe_wm))
189 return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
192 static bool intel_initial_watermarks(struct intel_atomic_state *state,
193 struct intel_crtc *crtc)
195 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
196 if (dev_priv->wm_disp->initial_watermarks) {
197 dev_priv->wm_disp->initial_watermarks(state, crtc);
203 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
204 struct intel_crtc *crtc)
206 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
207 if (dev_priv->wm_disp->atomic_update_watermarks)
208 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
211 static void intel_optimize_watermarks(struct intel_atomic_state *state,
212 struct intel_crtc *crtc)
214 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
215 if (dev_priv->wm_disp->optimize_watermarks)
216 dev_priv->wm_disp->optimize_watermarks(state, crtc);
219 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
221 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
222 if (dev_priv->wm_disp->compute_global_watermarks)
223 return dev_priv->wm_disp->compute_global_watermarks(state);
227 /* returns HPLL frequency in kHz */
228 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
230 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
232 /* Obtain SKU information */
233 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
234 CCK_FUSE_HPLL_FREQ_MASK;
236 return vco_freq[hpll_freq] * 1000;
239 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
240 const char *name, u32 reg, int ref_freq)
245 val = vlv_cck_read(dev_priv, reg);
246 divider = val & CCK_FREQUENCY_VALUES;
248 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
249 (divider << CCK_FREQUENCY_STATUS_SHIFT),
250 "%s change in progress\n", name);
252 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
255 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
256 const char *name, u32 reg)
260 vlv_cck_get(dev_priv);
262 if (dev_priv->hpll_freq == 0)
263 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
265 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
267 vlv_cck_put(dev_priv);
272 static void intel_update_czclk(struct drm_i915_private *dev_priv)
274 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
277 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
278 CCK_CZ_CLOCK_CONTROL);
280 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
281 dev_priv->czclk_freq);
284 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
286 return (crtc_state->active_planes &
287 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
290 /* WA Display #0827: Gen9:all */
292 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
295 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
296 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
298 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
299 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
302 /* Wa_2006604312:icl,ehl */
304 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
308 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
309 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
311 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
312 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
315 /* Wa_1604331009:icl,jsl,ehl */
317 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
320 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
321 enable ? CURSOR_GATING_DIS : 0);
325 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
327 return crtc_state->master_transcoder != INVALID_TRANSCODER;
331 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
333 return crtc_state->sync_mode_slaves_mask != 0;
337 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
339 return is_trans_port_sync_master(crtc_state) ||
340 is_trans_port_sync_slave(crtc_state);
343 static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
345 if (crtc_state->bigjoiner_slave)
346 return crtc_state->bigjoiner_linked_crtc;
348 return to_intel_crtc(crtc_state->uapi.crtc);
351 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
354 i915_reg_t reg = PIPEDSL(pipe);
358 if (DISPLAY_VER(dev_priv) == 2)
359 line_mask = DSL_LINEMASK_GEN2;
361 line_mask = DSL_LINEMASK_GEN3;
363 line1 = intel_de_read(dev_priv, reg) & line_mask;
365 line2 = intel_de_read(dev_priv, reg) & line_mask;
367 return line1 != line2;
370 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
372 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
373 enum pipe pipe = crtc->pipe;
375 /* Wait for the display line to settle/start moving */
376 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
377 drm_err(&dev_priv->drm,
378 "pipe %c scanline %s wait timed out\n",
379 pipe_name(pipe), onoff(state));
382 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
384 wait_for_pipe_scanline_moving(crtc, false);
387 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
389 wait_for_pipe_scanline_moving(crtc, true);
393 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
395 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
398 if (DISPLAY_VER(dev_priv) >= 4) {
399 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
400 i915_reg_t reg = PIPECONF(cpu_transcoder);
402 /* Wait for the Pipe State to go off */
403 if (intel_de_wait_for_clear(dev_priv, reg,
404 I965_PIPECONF_ACTIVE, 100))
405 drm_WARN(&dev_priv->drm, 1,
406 "pipe_off wait timed out\n");
408 intel_wait_for_pipe_scanline_stopped(crtc);
412 void assert_transcoder(struct drm_i915_private *dev_priv,
413 enum transcoder cpu_transcoder, bool state)
416 enum intel_display_power_domain power_domain;
417 intel_wakeref_t wakeref;
419 /* we keep both pipes enabled on 830 */
420 if (IS_I830(dev_priv))
423 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
424 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
426 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
427 cur_state = !!(val & PIPECONF_ENABLE);
429 intel_display_power_put(dev_priv, power_domain, wakeref);
434 I915_STATE_WARN(cur_state != state,
435 "transcoder %s assertion failure (expected %s, current %s)\n",
436 transcoder_name(cpu_transcoder),
437 onoff(state), onoff(cur_state));
440 static void assert_plane(struct intel_plane *plane, bool state)
445 cur_state = plane->get_hw_state(plane, &pipe);
447 I915_STATE_WARN(cur_state != state,
448 "%s assertion failure (expected %s, current %s)\n",
449 plane->base.name, onoff(state), onoff(cur_state));
452 #define assert_plane_enabled(p) assert_plane(p, true)
453 #define assert_plane_disabled(p) assert_plane(p, false)
455 static void assert_planes_disabled(struct intel_crtc *crtc)
457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
458 struct intel_plane *plane;
460 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
461 assert_plane_disabled(plane);
464 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
465 struct intel_digital_port *dig_port,
466 unsigned int expected_mask)
471 switch (dig_port->base.port) {
473 port_mask = DPLL_PORTB_READY_MASK;
477 port_mask = DPLL_PORTC_READY_MASK;
482 port_mask = DPLL_PORTD_READY_MASK;
483 dpll_reg = DPIO_PHY_STATUS;
489 if (intel_de_wait_for_register(dev_priv, dpll_reg,
490 port_mask, expected_mask, 1000))
491 drm_WARN(&dev_priv->drm, 1,
492 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
493 dig_port->base.base.base.id, dig_port->base.base.name,
494 intel_de_read(dev_priv, dpll_reg) & port_mask,
498 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
502 if (HAS_PCH_LPT(dev_priv))
508 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
510 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
512 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
513 enum pipe pipe = crtc->pipe;
517 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
519 assert_planes_disabled(crtc);
522 * A pipe without a PLL won't actually be able to drive bits from
523 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
526 if (HAS_GMCH(dev_priv)) {
527 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
528 assert_dsi_pll_enabled(dev_priv);
530 assert_pll_enabled(dev_priv, pipe);
532 if (new_crtc_state->has_pch_encoder) {
533 /* if driving the PCH, we need FDI enabled */
534 assert_fdi_rx_pll_enabled(dev_priv,
535 intel_crtc_pch_transcoder(crtc));
536 assert_fdi_tx_pll_enabled(dev_priv,
537 (enum pipe) cpu_transcoder);
539 /* FIXME: assert CPU port conditions for SNB+ */
542 /* Wa_22012358565:adl-p */
543 if (DISPLAY_VER(dev_priv) == 13)
544 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
545 0, PIPE_ARB_USE_PROG_SLOTS);
547 reg = PIPECONF(cpu_transcoder);
548 val = intel_de_read(dev_priv, reg);
549 if (val & PIPECONF_ENABLE) {
550 /* we keep both pipes enabled on 830 */
551 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
555 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
556 intel_de_posting_read(dev_priv, reg);
559 * Until the pipe starts PIPEDSL reads will return a stale value,
560 * which causes an apparent vblank timestamp jump when PIPEDSL
561 * resets to its proper value. That also messes up the frame count
562 * when it's derived from the timestamps. So let's wait for the
563 * pipe to start properly before we call drm_crtc_vblank_on()
565 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
566 intel_wait_for_pipe_scanline_moving(crtc);
569 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
571 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
572 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
573 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
574 enum pipe pipe = crtc->pipe;
578 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
581 * Make sure planes won't keep trying to pump pixels to us,
582 * or we might hang the display.
584 assert_planes_disabled(crtc);
586 reg = PIPECONF(cpu_transcoder);
587 val = intel_de_read(dev_priv, reg);
588 if ((val & PIPECONF_ENABLE) == 0)
592 * Double wide has implications for planes
593 * so best keep it disabled when not needed.
595 if (old_crtc_state->double_wide)
596 val &= ~PIPECONF_DOUBLE_WIDE;
598 /* Don't disable pipe or pipe PLLs if needed */
599 if (!IS_I830(dev_priv))
600 val &= ~PIPECONF_ENABLE;
602 if (DISPLAY_VER(dev_priv) >= 12)
603 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
604 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
606 intel_de_write(dev_priv, reg, val);
607 if ((val & PIPECONF_ENABLE) == 0)
608 intel_wait_for_pipe_off(old_crtc_state);
611 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
613 unsigned int size = 0;
616 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
617 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
622 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
624 unsigned int size = 0;
627 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
628 unsigned int plane_size;
630 if (rem_info->plane[i].linear)
631 plane_size = rem_info->plane[i].size;
633 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
638 if (rem_info->plane_alignment)
639 size = ALIGN(size, rem_info->plane_alignment);
647 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
649 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
650 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
652 return DISPLAY_VER(dev_priv) < 4 ||
654 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
658 * Convert the x/y offsets into a linear offset.
659 * Only valid with 0/180 degree rotation, which is fine since linear
660 * offset is only used with linear buffers on pre-hsw and tiled buffers
661 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
663 u32 intel_fb_xy_to_linear(int x, int y,
664 const struct intel_plane_state *state,
667 const struct drm_framebuffer *fb = state->hw.fb;
668 unsigned int cpp = fb->format->cpp[color_plane];
669 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
671 return y * pitch + x * cpp;
675 * Add the x/y offsets derived from fb->offsets[] to the user
676 * specified plane src x/y offsets. The resulting x/y offsets
677 * specify the start of scanout from the beginning of the gtt mapping.
679 void intel_add_fb_offsets(int *x, int *y,
680 const struct intel_plane_state *state,
684 *x += state->view.color_plane[color_plane].x;
685 *y += state->view.color_plane[color_plane].y;
688 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
689 u32 pixel_format, u64 modifier)
691 struct intel_crtc *crtc;
692 struct intel_plane *plane;
694 if (!HAS_DISPLAY(dev_priv))
698 * We assume the primary plane for pipe A has
699 * the highest stride limits of them all,
700 * if in case pipe A is disabled, use the first pipe from pipe_mask.
702 crtc = intel_first_crtc(dev_priv);
706 plane = to_intel_plane(crtc->base.primary);
708 return plane->max_stride(plane, pixel_format, modifier,
713 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
714 struct intel_plane_state *plane_state,
717 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
719 plane_state->uapi.visible = visible;
722 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
724 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
727 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
729 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
730 struct drm_plane *plane;
733 * Active_planes aliases if multiple "primary" or cursor planes
734 * have been used on the same (or wrong) pipe. plane_mask uses
735 * unique ids, hence we can use that to reconstruct active_planes.
737 crtc_state->enabled_planes = 0;
738 crtc_state->active_planes = 0;
740 drm_for_each_plane_mask(plane, &dev_priv->drm,
741 crtc_state->uapi.plane_mask) {
742 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
743 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
747 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
748 struct intel_plane *plane)
750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
751 struct intel_crtc_state *crtc_state =
752 to_intel_crtc_state(crtc->base.state);
753 struct intel_plane_state *plane_state =
754 to_intel_plane_state(plane->base.state);
756 drm_dbg_kms(&dev_priv->drm,
757 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
758 plane->base.base.id, plane->base.name,
759 crtc->base.base.id, crtc->base.name);
761 intel_set_plane_visible(crtc_state, plane_state, false);
762 fixup_plane_bitmasks(crtc_state);
763 crtc_state->data_rate[plane->id] = 0;
764 crtc_state->min_cdclk[plane->id] = 0;
766 if (plane->id == PLANE_PRIMARY)
767 hsw_disable_ips(crtc_state);
770 * Vblank time updates from the shadow to live plane control register
771 * are blocked if the memory self-refresh mode is active at that
772 * moment. So to make sure the plane gets truly disabled, disable
773 * first the self-refresh mode. The self-refresh enable bit in turn
774 * will be checked/applied by the HW only at the next frame start
775 * event which is after the vblank start event, so we need to have a
776 * wait-for-vblank between disabling the plane and the pipe.
778 if (HAS_GMCH(dev_priv) &&
779 intel_set_memory_cxsr(dev_priv, false))
780 intel_crtc_wait_for_next_vblank(crtc);
783 * Gen2 reports pipe underruns whenever all planes are disabled.
784 * So disable underrun reporting before all the planes get disabled.
786 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
787 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
789 intel_plane_disable_arm(plane, crtc_state);
790 intel_crtc_wait_for_next_vblank(crtc);
794 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
798 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
799 plane_state->view.color_plane[0].offset, 0);
805 __intel_display_resume(struct drm_device *dev,
806 struct drm_atomic_state *state,
807 struct drm_modeset_acquire_ctx *ctx)
809 struct drm_crtc_state *crtc_state;
810 struct drm_crtc *crtc;
813 intel_modeset_setup_hw_state(dev, ctx);
814 intel_vga_redisable(to_i915(dev));
820 * We've duplicated the state, pointers to the old state are invalid.
822 * Don't attempt to use the old state until we commit the duplicated state.
824 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
826 * Force recalculation even if we restore
827 * current state. With fast modeset this may not result
828 * in a modeset when the state is compatible.
830 crtc_state->mode_changed = true;
833 /* ignore any reset values/BIOS leftovers in the WM registers */
834 if (!HAS_GMCH(to_i915(dev)))
835 to_intel_atomic_state(state)->skip_intermediate_wm = true;
837 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
839 drm_WARN_ON(dev, ret == -EDEADLK);
843 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
845 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
846 intel_has_gpu_reset(&dev_priv->gt));
849 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
851 struct drm_device *dev = &dev_priv->drm;
852 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
853 struct drm_atomic_state *state;
856 if (!HAS_DISPLAY(dev_priv))
859 /* reset doesn't touch the display */
860 if (!dev_priv->params.force_reset_modeset_test &&
861 !gpu_reset_clobbers_display(dev_priv))
864 /* We have a modeset vs reset deadlock, defensively unbreak it. */
865 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
866 smp_mb__after_atomic();
867 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
869 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
870 drm_dbg_kms(&dev_priv->drm,
871 "Modeset potentially stuck, unbreaking through wedging\n");
872 intel_gt_set_wedged(&dev_priv->gt);
876 * Need mode_config.mutex so that we don't
877 * trample ongoing ->detect() and whatnot.
879 mutex_lock(&dev->mode_config.mutex);
880 drm_modeset_acquire_init(ctx, 0);
882 ret = drm_modeset_lock_all_ctx(dev, ctx);
886 drm_modeset_backoff(ctx);
889 * Disabling the crtcs gracefully seems nicer. Also the
890 * g33 docs say we should at least disable all the planes.
892 state = drm_atomic_helper_duplicate_state(dev, ctx);
894 ret = PTR_ERR(state);
895 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
900 ret = drm_atomic_helper_disable_all(dev, ctx);
902 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
904 drm_atomic_state_put(state);
908 dev_priv->modeset_restore_state = state;
909 state->acquire_ctx = ctx;
912 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
914 struct drm_device *dev = &dev_priv->drm;
915 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
916 struct drm_atomic_state *state;
919 if (!HAS_DISPLAY(dev_priv))
922 /* reset doesn't touch the display */
923 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
926 state = fetch_and_zero(&dev_priv->modeset_restore_state);
930 /* reset doesn't touch the display */
931 if (!gpu_reset_clobbers_display(dev_priv)) {
932 /* for testing only restore the display */
933 ret = __intel_display_resume(dev, state, ctx);
935 drm_err(&dev_priv->drm,
936 "Restoring old state failed with %i\n", ret);
939 * The display has been reset as well,
940 * so need a full re-initialization.
942 intel_pps_unlock_regs_wa(dev_priv);
943 intel_modeset_init_hw(dev_priv);
944 intel_init_clock_gating(dev_priv);
945 intel_hpd_init(dev_priv);
947 ret = __intel_display_resume(dev, state, ctx);
949 drm_err(&dev_priv->drm,
950 "Restoring old state failed with %i\n", ret);
952 intel_hpd_poll_disable(dev_priv);
955 drm_atomic_state_put(state);
957 drm_modeset_drop_locks(ctx);
958 drm_modeset_acquire_fini(ctx);
959 mutex_unlock(&dev->mode_config.mutex);
961 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
964 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
966 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
967 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
968 enum pipe pipe = crtc->pipe;
971 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
974 * Display WA #1153: icl
975 * enable hardware to bypass the alpha math
976 * and rounding for per-pixel values 00 and 0xff
978 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
980 * Display WA # 1605353570: icl
981 * Set the pixel rounding bit to 1 for allowing
982 * passthrough of Frame buffer pixels unmodified
985 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
988 * Underrun recovery must always be disabled on display 13+.
989 * DG2 chicken bit meaning is inverted compared to other platforms.
991 if (IS_DG2(dev_priv))
992 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
993 else if (DISPLAY_VER(dev_priv) >= 13)
994 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
996 /* Wa_14010547955:dg2 */
997 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
998 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
1000 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1003 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1005 struct drm_crtc *crtc;
1008 drm_for_each_crtc(crtc, &dev_priv->drm) {
1009 struct drm_crtc_commit *commit;
1010 spin_lock(&crtc->commit_lock);
1011 commit = list_first_entry_or_null(&crtc->commit_list,
1012 struct drm_crtc_commit, commit_entry);
1013 cleanup_done = commit ?
1014 try_wait_for_completion(&commit->cleanup_done) : true;
1015 spin_unlock(&crtc->commit_lock);
1020 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1029 * Finds the encoder associated with the given CRTC. This can only be
1030 * used when we know that the CRTC isn't feeding multiple encoders!
1032 struct intel_encoder *
1033 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1034 const struct intel_crtc_state *crtc_state)
1036 const struct drm_connector_state *connector_state;
1037 const struct drm_connector *connector;
1038 struct intel_encoder *encoder = NULL;
1039 struct intel_crtc *master_crtc;
1040 int num_encoders = 0;
1043 master_crtc = intel_master_crtc(crtc_state);
1045 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1046 if (connector_state->crtc != &master_crtc->base)
1049 encoder = to_intel_encoder(connector_state->best_encoder);
1053 drm_WARN(encoder->base.dev, num_encoders != 1,
1054 "%d encoders for pipe %c\n",
1055 num_encoders, pipe_name(master_crtc->pipe));
1060 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1063 i915_reg_t dslreg = PIPEDSL(pipe);
1066 temp = intel_de_read(dev_priv, dslreg);
1068 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1069 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1070 drm_err(&dev_priv->drm,
1071 "mode set failed: pipe %c stuck\n",
1076 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1078 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1079 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1080 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1081 enum pipe pipe = crtc->pipe;
1082 int width = drm_rect_width(dst);
1083 int height = drm_rect_height(dst);
1087 if (!crtc_state->pch_pfit.enabled)
1090 /* Force use of hard-coded filter coefficients
1091 * as some pre-programmed values are broken,
1094 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1095 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1096 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1098 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1100 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1101 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1104 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1106 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1107 struct drm_device *dev = crtc->base.dev;
1108 struct drm_i915_private *dev_priv = to_i915(dev);
1110 if (!crtc_state->ips_enabled)
1114 * We can only enable IPS after we enable a plane and wait for a vblank
1115 * This function is called from post_plane_update, which is run after
1118 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1120 if (IS_BROADWELL(dev_priv)) {
1121 drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1122 IPS_ENABLE | IPS_PCODE_CONTROL));
1123 /* Quoting Art Runyan: "its not safe to expect any particular
1124 * value in IPS_CTL bit 31 after enabling IPS through the
1125 * mailbox." Moreover, the mailbox may return a bogus state,
1126 * so we need to just enable it and continue on.
1129 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1130 /* The bit only becomes 1 in the next vblank, so this wait here
1131 * is essentially intel_wait_for_vblank. If we don't have this
1132 * and don't wait for vblanks until the end of crtc_enable, then
1133 * the HW state readout code will complain that the expected
1134 * IPS_CTL value is not the one we read. */
1135 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1136 drm_err(&dev_priv->drm,
1137 "Timed out waiting for IPS enable\n");
1141 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1143 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1144 struct drm_device *dev = crtc->base.dev;
1145 struct drm_i915_private *dev_priv = to_i915(dev);
1147 if (!crtc_state->ips_enabled)
1150 if (IS_BROADWELL(dev_priv)) {
1152 snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1154 * Wait for PCODE to finish disabling IPS. The BSpec specified
1155 * 42ms timeout value leads to occasional timeouts so use 100ms
1158 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1159 drm_err(&dev_priv->drm,
1160 "Timed out waiting for IPS disable\n");
1162 intel_de_write(dev_priv, IPS_CTL, 0);
1163 intel_de_posting_read(dev_priv, IPS_CTL);
1166 /* We need to wait for a vblank before we can disable the plane. */
1167 intel_crtc_wait_for_next_vblank(crtc);
1170 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1173 (void) intel_overlay_switch_off(crtc->overlay);
1175 /* Let userspace switch the overlay on again. In most cases userspace
1176 * has to recompute where to put it anyway.
1180 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1181 const struct intel_crtc_state *new_crtc_state)
1183 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1184 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1186 if (!old_crtc_state->ips_enabled)
1189 if (intel_crtc_needs_modeset(new_crtc_state))
1193 * Workaround : Do not read or write the pipe palette/gamma data while
1194 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1196 * Disable IPS before we program the LUT.
1198 if (IS_HASWELL(dev_priv) &&
1199 (new_crtc_state->uapi.color_mgmt_changed ||
1200 new_crtc_state->update_pipe) &&
1201 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1204 return !new_crtc_state->ips_enabled;
1207 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1208 const struct intel_crtc_state *new_crtc_state)
1210 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1211 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1213 if (!new_crtc_state->ips_enabled)
1216 if (intel_crtc_needs_modeset(new_crtc_state))
1220 * Workaround : Do not read or write the pipe palette/gamma data while
1221 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1223 * Re-enable IPS after the LUT has been programmed.
1225 if (IS_HASWELL(dev_priv) &&
1226 (new_crtc_state->uapi.color_mgmt_changed ||
1227 new_crtc_state->update_pipe) &&
1228 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1232 * We can't read out IPS on broadwell, assume the worst and
1233 * forcibly enable IPS on the first fastset.
1235 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1238 return !old_crtc_state->ips_enabled;
1241 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1243 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1245 if (!crtc_state->nv12_planes)
1248 /* WA Display #0827: Gen9:all */
1249 if (DISPLAY_VER(dev_priv) == 9)
1255 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1257 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1259 /* Wa_2006604312:icl,ehl */
1260 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1266 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1268 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1270 /* Wa_1604331009:icl,jsl,ehl */
1271 if (is_hdr_mode(crtc_state) &&
1272 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1273 DISPLAY_VER(dev_priv) == 11)
1279 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1280 enum pipe pipe, bool enable)
1282 if (DISPLAY_VER(i915) == 9) {
1284 * "Plane N strech max must be programmed to 11b (x1)
1285 * when Async flips are enabled on that plane."
1287 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1288 SKL_PLANE1_STRETCH_MAX_MASK,
1289 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1291 /* Also needed on HSW/BDW albeit undocumented */
1292 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1293 HSW_PRI_STRETCH_MAX_MASK,
1294 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1298 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1300 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1302 return crtc_state->uapi.async_flip && intel_vtd_active() &&
1303 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1306 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1307 const struct intel_crtc_state *new_crtc_state)
1309 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1310 new_crtc_state->active_planes;
1313 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1314 const struct intel_crtc_state *new_crtc_state)
1316 return old_crtc_state->active_planes &&
1317 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1320 static void intel_post_plane_update(struct intel_atomic_state *state,
1321 struct intel_crtc *crtc)
1323 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1324 const struct intel_crtc_state *old_crtc_state =
1325 intel_atomic_get_old_crtc_state(state, crtc);
1326 const struct intel_crtc_state *new_crtc_state =
1327 intel_atomic_get_new_crtc_state(state, crtc);
1328 enum pipe pipe = crtc->pipe;
1330 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1332 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1333 intel_update_watermarks(dev_priv);
1335 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1336 hsw_enable_ips(new_crtc_state);
1338 intel_fbc_post_update(state, crtc);
1339 intel_drrs_page_flip(state, crtc);
1341 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1342 !needs_async_flip_vtd_wa(new_crtc_state))
1343 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1345 if (needs_nv12_wa(old_crtc_state) &&
1346 !needs_nv12_wa(new_crtc_state))
1347 skl_wa_827(dev_priv, pipe, false);
1349 if (needs_scalerclk_wa(old_crtc_state) &&
1350 !needs_scalerclk_wa(new_crtc_state))
1351 icl_wa_scalerclkgating(dev_priv, pipe, false);
1353 if (needs_cursorclk_wa(old_crtc_state) &&
1354 !needs_cursorclk_wa(new_crtc_state))
1355 icl_wa_cursorclkgating(dev_priv, pipe, false);
1359 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1360 struct intel_crtc *crtc)
1362 const struct intel_crtc_state *crtc_state =
1363 intel_atomic_get_new_crtc_state(state, crtc);
1364 u8 update_planes = crtc_state->update_planes;
1365 const struct intel_plane_state *plane_state;
1366 struct intel_plane *plane;
1369 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1370 if (plane->enable_flip_done &&
1371 plane->pipe == crtc->pipe &&
1372 update_planes & BIT(plane->id) &&
1373 plane_state->do_async_flip)
1374 plane->enable_flip_done(plane);
1378 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1379 struct intel_crtc *crtc)
1381 const struct intel_crtc_state *crtc_state =
1382 intel_atomic_get_new_crtc_state(state, crtc);
1383 u8 update_planes = crtc_state->update_planes;
1384 const struct intel_plane_state *plane_state;
1385 struct intel_plane *plane;
1388 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1389 if (plane->disable_flip_done &&
1390 plane->pipe == crtc->pipe &&
1391 update_planes & BIT(plane->id) &&
1392 plane_state->do_async_flip)
1393 plane->disable_flip_done(plane);
1397 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1398 struct intel_crtc *crtc)
1400 const struct intel_crtc_state *old_crtc_state =
1401 intel_atomic_get_old_crtc_state(state, crtc);
1402 const struct intel_crtc_state *new_crtc_state =
1403 intel_atomic_get_new_crtc_state(state, crtc);
1404 u8 update_planes = new_crtc_state->update_planes;
1405 const struct intel_plane_state *old_plane_state;
1406 struct intel_plane *plane;
1407 bool need_vbl_wait = false;
1410 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1411 if (plane->need_async_flip_disable_wa &&
1412 plane->pipe == crtc->pipe &&
1413 update_planes & BIT(plane->id)) {
1415 * Apart from the async flip bit we want to
1416 * preserve the old state for the plane.
1418 plane->async_flip(plane, old_crtc_state,
1419 old_plane_state, false);
1420 need_vbl_wait = true;
1425 intel_crtc_wait_for_next_vblank(crtc);
1428 static void intel_pre_plane_update(struct intel_atomic_state *state,
1429 struct intel_crtc *crtc)
1431 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1432 const struct intel_crtc_state *old_crtc_state =
1433 intel_atomic_get_old_crtc_state(state, crtc);
1434 const struct intel_crtc_state *new_crtc_state =
1435 intel_atomic_get_new_crtc_state(state, crtc);
1436 enum pipe pipe = crtc->pipe;
1438 intel_psr_pre_plane_update(state, crtc);
1440 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1441 hsw_disable_ips(old_crtc_state);
1443 if (intel_fbc_pre_update(state, crtc))
1444 intel_crtc_wait_for_next_vblank(crtc);
1446 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1447 needs_async_flip_vtd_wa(new_crtc_state))
1448 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1450 /* Display WA 827 */
1451 if (!needs_nv12_wa(old_crtc_state) &&
1452 needs_nv12_wa(new_crtc_state))
1453 skl_wa_827(dev_priv, pipe, true);
1455 /* Wa_2006604312:icl,ehl */
1456 if (!needs_scalerclk_wa(old_crtc_state) &&
1457 needs_scalerclk_wa(new_crtc_state))
1458 icl_wa_scalerclkgating(dev_priv, pipe, true);
1460 /* Wa_1604331009:icl,jsl,ehl */
1461 if (!needs_cursorclk_wa(old_crtc_state) &&
1462 needs_cursorclk_wa(new_crtc_state))
1463 icl_wa_cursorclkgating(dev_priv, pipe, true);
1466 * Vblank time updates from the shadow to live plane control register
1467 * are blocked if the memory self-refresh mode is active at that
1468 * moment. So to make sure the plane gets truly disabled, disable
1469 * first the self-refresh mode. The self-refresh enable bit in turn
1470 * will be checked/applied by the HW only at the next frame start
1471 * event which is after the vblank start event, so we need to have a
1472 * wait-for-vblank between disabling the plane and the pipe.
1474 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1475 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1476 intel_crtc_wait_for_next_vblank(crtc);
1479 * IVB workaround: must disable low power watermarks for at least
1480 * one frame before enabling scaling. LP watermarks can be re-enabled
1481 * when scaling is disabled.
1483 * WaCxSRDisabledForSpriteScaling:ivb
1485 if (old_crtc_state->hw.active &&
1486 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1487 intel_crtc_wait_for_next_vblank(crtc);
1490 * If we're doing a modeset we don't need to do any
1491 * pre-vblank watermark programming here.
1493 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1495 * For platforms that support atomic watermarks, program the
1496 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1497 * will be the intermediate values that are safe for both pre- and
1498 * post- vblank; when vblank happens, the 'active' values will be set
1499 * to the final 'target' values and we'll do this again to get the
1500 * optimal watermarks. For gen9+ platforms, the values we program here
1501 * will be the final target values which will get automatically latched
1502 * at vblank time; no further programming will be necessary.
1504 * If a platform hasn't been transitioned to atomic watermarks yet,
1505 * we'll continue to update watermarks the old way, if flags tell
1508 if (!intel_initial_watermarks(state, crtc))
1509 if (new_crtc_state->update_wm_pre)
1510 intel_update_watermarks(dev_priv);
1514 * Gen2 reports pipe underruns whenever all planes are disabled.
1515 * So disable underrun reporting before all the planes get disabled.
1517 * We do this after .initial_watermarks() so that we have a
1518 * chance of catching underruns with the intermediate watermarks
1519 * vs. the old plane configuration.
1521 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1522 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1525 * WA for platforms where async address update enable bit
1526 * is double buffered and only latched at start of vblank.
1528 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1529 intel_crtc_async_flip_disable_wa(state, crtc);
1532 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1533 struct intel_crtc *crtc)
1535 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1536 const struct intel_crtc_state *new_crtc_state =
1537 intel_atomic_get_new_crtc_state(state, crtc);
1538 unsigned int update_mask = new_crtc_state->update_planes;
1539 const struct intel_plane_state *old_plane_state;
1540 struct intel_plane *plane;
1541 unsigned fb_bits = 0;
1544 intel_crtc_dpms_overlay_disable(crtc);
1546 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1547 if (crtc->pipe != plane->pipe ||
1548 !(update_mask & BIT(plane->id)))
1551 intel_plane_disable_arm(plane, new_crtc_state);
1553 if (old_plane_state->uapi.visible)
1554 fb_bits |= plane->frontbuffer_bit;
1557 intel_frontbuffer_flip(dev_priv, fb_bits);
1561 * intel_connector_primary_encoder - get the primary encoder for a connector
1562 * @connector: connector for which to return the encoder
1564 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1565 * all connectors to their encoder, except for DP-MST connectors which have
1566 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1567 * pointed to by as many DP-MST connectors as there are pipes.
1569 static struct intel_encoder *
1570 intel_connector_primary_encoder(struct intel_connector *connector)
1572 struct intel_encoder *encoder;
1574 if (connector->mst_port)
1575 return &dp_to_dig_port(connector->mst_port)->base;
1577 encoder = intel_attached_encoder(connector);
1578 drm_WARN_ON(connector->base.dev, !encoder);
1583 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1585 struct drm_i915_private *i915 = to_i915(state->base.dev);
1586 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1587 struct intel_crtc *crtc;
1588 struct drm_connector_state *new_conn_state;
1589 struct drm_connector *connector;
1593 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1594 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1596 if (i915->dpll.mgr) {
1597 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1598 if (intel_crtc_needs_modeset(new_crtc_state))
1601 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1602 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1606 if (!state->modeset)
1609 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1611 struct intel_connector *intel_connector;
1612 struct intel_encoder *encoder;
1613 struct intel_crtc *crtc;
1615 if (!intel_connector_needs_modeset(state, connector))
1618 intel_connector = to_intel_connector(connector);
1619 encoder = intel_connector_primary_encoder(intel_connector);
1620 if (!encoder->update_prepare)
1623 crtc = new_conn_state->crtc ?
1624 to_intel_crtc(new_conn_state->crtc) : NULL;
1625 encoder->update_prepare(state, encoder, crtc);
1629 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1631 struct drm_connector_state *new_conn_state;
1632 struct drm_connector *connector;
1635 if (!state->modeset)
1638 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1640 struct intel_connector *intel_connector;
1641 struct intel_encoder *encoder;
1642 struct intel_crtc *crtc;
1644 if (!intel_connector_needs_modeset(state, connector))
1647 intel_connector = to_intel_connector(connector);
1648 encoder = intel_connector_primary_encoder(intel_connector);
1649 if (!encoder->update_complete)
1652 crtc = new_conn_state->crtc ?
1653 to_intel_crtc(new_conn_state->crtc) : NULL;
1654 encoder->update_complete(state, encoder, crtc);
1658 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1659 struct intel_crtc *crtc)
1661 const struct intel_crtc_state *crtc_state =
1662 intel_atomic_get_new_crtc_state(state, crtc);
1663 const struct drm_connector_state *conn_state;
1664 struct drm_connector *conn;
1667 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1668 struct intel_encoder *encoder =
1669 to_intel_encoder(conn_state->best_encoder);
1671 if (conn_state->crtc != &crtc->base)
1674 if (encoder->pre_pll_enable)
1675 encoder->pre_pll_enable(state, encoder,
1676 crtc_state, conn_state);
1680 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1681 struct intel_crtc *crtc)
1683 const struct intel_crtc_state *crtc_state =
1684 intel_atomic_get_new_crtc_state(state, crtc);
1685 const struct drm_connector_state *conn_state;
1686 struct drm_connector *conn;
1689 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1690 struct intel_encoder *encoder =
1691 to_intel_encoder(conn_state->best_encoder);
1693 if (conn_state->crtc != &crtc->base)
1696 if (encoder->pre_enable)
1697 encoder->pre_enable(state, encoder,
1698 crtc_state, conn_state);
1702 static void intel_encoders_enable(struct intel_atomic_state *state,
1703 struct intel_crtc *crtc)
1705 const struct intel_crtc_state *crtc_state =
1706 intel_atomic_get_new_crtc_state(state, crtc);
1707 const struct drm_connector_state *conn_state;
1708 struct drm_connector *conn;
1711 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1712 struct intel_encoder *encoder =
1713 to_intel_encoder(conn_state->best_encoder);
1715 if (conn_state->crtc != &crtc->base)
1718 if (encoder->enable)
1719 encoder->enable(state, encoder,
1720 crtc_state, conn_state);
1721 intel_opregion_notify_encoder(encoder, true);
1725 static void intel_encoders_disable(struct intel_atomic_state *state,
1726 struct intel_crtc *crtc)
1728 const struct intel_crtc_state *old_crtc_state =
1729 intel_atomic_get_old_crtc_state(state, crtc);
1730 const struct drm_connector_state *old_conn_state;
1731 struct drm_connector *conn;
1734 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1735 struct intel_encoder *encoder =
1736 to_intel_encoder(old_conn_state->best_encoder);
1738 if (old_conn_state->crtc != &crtc->base)
1741 intel_opregion_notify_encoder(encoder, false);
1742 if (encoder->disable)
1743 encoder->disable(state, encoder,
1744 old_crtc_state, old_conn_state);
1748 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1749 struct intel_crtc *crtc)
1751 const struct intel_crtc_state *old_crtc_state =
1752 intel_atomic_get_old_crtc_state(state, crtc);
1753 const struct drm_connector_state *old_conn_state;
1754 struct drm_connector *conn;
1757 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1758 struct intel_encoder *encoder =
1759 to_intel_encoder(old_conn_state->best_encoder);
1761 if (old_conn_state->crtc != &crtc->base)
1764 if (encoder->post_disable)
1765 encoder->post_disable(state, encoder,
1766 old_crtc_state, old_conn_state);
1770 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1771 struct intel_crtc *crtc)
1773 const struct intel_crtc_state *old_crtc_state =
1774 intel_atomic_get_old_crtc_state(state, crtc);
1775 const struct drm_connector_state *old_conn_state;
1776 struct drm_connector *conn;
1779 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1780 struct intel_encoder *encoder =
1781 to_intel_encoder(old_conn_state->best_encoder);
1783 if (old_conn_state->crtc != &crtc->base)
1786 if (encoder->post_pll_disable)
1787 encoder->post_pll_disable(state, encoder,
1788 old_crtc_state, old_conn_state);
1792 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1793 struct intel_crtc *crtc)
1795 const struct intel_crtc_state *crtc_state =
1796 intel_atomic_get_new_crtc_state(state, crtc);
1797 const struct drm_connector_state *conn_state;
1798 struct drm_connector *conn;
1801 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1802 struct intel_encoder *encoder =
1803 to_intel_encoder(conn_state->best_encoder);
1805 if (conn_state->crtc != &crtc->base)
1808 if (encoder->update_pipe)
1809 encoder->update_pipe(state, encoder,
1810 crtc_state, conn_state);
1814 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1816 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1817 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1819 plane->disable_arm(plane, crtc_state);
1822 static void ilk_crtc_enable(struct intel_atomic_state *state,
1823 struct intel_crtc *crtc)
1825 const struct intel_crtc_state *new_crtc_state =
1826 intel_atomic_get_new_crtc_state(state, crtc);
1827 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1828 enum pipe pipe = crtc->pipe;
1830 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1834 * Sometimes spurious CPU pipe underruns happen during FDI
1835 * training, at least with VGA+HDMI cloning. Suppress them.
1837 * On ILK we get an occasional spurious CPU pipe underruns
1838 * between eDP port A enable and vdd enable. Also PCH port
1839 * enable seems to result in the occasional CPU pipe underrun.
1841 * Spurious PCH underruns also occur during PCH enabling.
1843 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1844 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1846 if (intel_crtc_has_dp_encoder(new_crtc_state))
1847 intel_dp_set_m_n(new_crtc_state, M1_N1);
1849 intel_set_transcoder_timings(new_crtc_state);
1850 intel_set_pipe_src_size(new_crtc_state);
1852 if (new_crtc_state->has_pch_encoder)
1853 intel_cpu_transcoder_set_m_n(new_crtc_state,
1854 &new_crtc_state->fdi_m_n, NULL);
1856 ilk_set_pipeconf(new_crtc_state);
1858 crtc->active = true;
1860 intel_encoders_pre_enable(state, crtc);
1862 if (new_crtc_state->has_pch_encoder) {
1863 ilk_pch_pre_enable(state, crtc);
1865 assert_fdi_tx_disabled(dev_priv, pipe);
1866 assert_fdi_rx_disabled(dev_priv, pipe);
1869 ilk_pfit_enable(new_crtc_state);
1872 * On ILK+ LUT must be loaded before the pipe is running but with
1875 intel_color_load_luts(new_crtc_state);
1876 intel_color_commit(new_crtc_state);
1877 /* update DSPCNTR to configure gamma for pipe bottom color */
1878 intel_disable_primary_plane(new_crtc_state);
1880 intel_initial_watermarks(state, crtc);
1881 intel_enable_transcoder(new_crtc_state);
1883 if (new_crtc_state->has_pch_encoder)
1884 ilk_pch_enable(state, crtc);
1886 intel_crtc_vblank_on(new_crtc_state);
1888 intel_encoders_enable(state, crtc);
1890 if (HAS_PCH_CPT(dev_priv))
1891 cpt_verify_modeset(dev_priv, pipe);
1894 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1895 * And a second vblank wait is needed at least on ILK with
1896 * some interlaced HDMI modes. Let's do the double wait always
1897 * in case there are more corner cases we don't know about.
1899 if (new_crtc_state->has_pch_encoder) {
1900 intel_crtc_wait_for_next_vblank(crtc);
1901 intel_crtc_wait_for_next_vblank(crtc);
1903 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1904 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1907 /* IPS only exists on ULT machines and is tied to pipe A. */
1908 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1910 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1913 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1914 enum pipe pipe, bool apply)
1916 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1917 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1924 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1927 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1929 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1930 enum pipe pipe = crtc->pipe;
1933 /* Wa_22010947358:adl-p */
1934 if (IS_ALDERLAKE_P(dev_priv))
1935 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1937 val = MBUS_DBOX_A_CREDIT(2);
1939 if (DISPLAY_VER(dev_priv) >= 12) {
1940 val |= MBUS_DBOX_BW_CREDIT(2);
1941 val |= MBUS_DBOX_B_CREDIT(12);
1943 val |= MBUS_DBOX_BW_CREDIT(1);
1944 val |= MBUS_DBOX_B_CREDIT(8);
1947 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1950 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1952 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1953 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1955 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1956 HSW_LINETIME(crtc_state->linetime) |
1957 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1960 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1962 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1963 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1964 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1967 val = intel_de_read(dev_priv, reg);
1968 val &= ~HSW_FRAME_START_DELAY_MASK;
1969 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1970 intel_de_write(dev_priv, reg, val);
1973 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1974 const struct intel_crtc_state *crtc_state)
1976 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1977 struct intel_crtc_state *master_crtc_state;
1978 struct intel_crtc *master_crtc;
1979 struct drm_connector_state *conn_state;
1980 struct drm_connector *conn;
1981 struct intel_encoder *encoder = NULL;
1984 master_crtc = intel_master_crtc(crtc_state);
1985 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1987 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1988 if (conn_state->crtc != &master_crtc->base)
1991 encoder = to_intel_encoder(conn_state->best_encoder);
1996 * Enable sequence steps 1-7 on bigjoiner master
1998 if (crtc_state->bigjoiner_slave)
1999 intel_encoders_pre_pll_enable(state, master_crtc);
2001 if (crtc_state->shared_dpll)
2002 intel_enable_shared_dpll(crtc_state);
2004 if (crtc_state->bigjoiner_slave)
2005 intel_encoders_pre_enable(state, master_crtc);
2007 /* need to enable VDSC, which we skipped in pre-enable */
2008 intel_dsc_enable(crtc_state);
2010 if (DISPLAY_VER(dev_priv) >= 13)
2011 intel_uncompressed_joiner_enable(crtc_state);
2014 static void hsw_crtc_enable(struct intel_atomic_state *state,
2015 struct intel_crtc *crtc)
2017 const struct intel_crtc_state *new_crtc_state =
2018 intel_atomic_get_new_crtc_state(state, crtc);
2019 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2020 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2021 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2022 bool psl_clkgate_wa;
2024 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2027 if (!new_crtc_state->bigjoiner) {
2028 intel_encoders_pre_pll_enable(state, crtc);
2030 if (new_crtc_state->shared_dpll)
2031 intel_enable_shared_dpll(new_crtc_state);
2033 intel_encoders_pre_enable(state, crtc);
2035 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2038 intel_set_pipe_src_size(new_crtc_state);
2039 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2040 bdw_set_pipemisc(new_crtc_state);
2042 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
2043 intel_set_transcoder_timings(new_crtc_state);
2045 if (cpu_transcoder != TRANSCODER_EDP)
2046 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2047 new_crtc_state->pixel_multiplier - 1);
2049 if (new_crtc_state->has_pch_encoder)
2050 intel_cpu_transcoder_set_m_n(new_crtc_state,
2051 &new_crtc_state->fdi_m_n, NULL);
2053 hsw_set_frame_start_delay(new_crtc_state);
2055 hsw_set_transconf(new_crtc_state);
2058 crtc->active = true;
2060 /* Display WA #1180: WaDisableScalarClockGating: glk */
2061 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2062 new_crtc_state->pch_pfit.enabled;
2064 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2066 if (DISPLAY_VER(dev_priv) >= 9)
2067 skl_pfit_enable(new_crtc_state);
2069 ilk_pfit_enable(new_crtc_state);
2072 * On ILK+ LUT must be loaded before the pipe is running but with
2075 intel_color_load_luts(new_crtc_state);
2076 intel_color_commit(new_crtc_state);
2077 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
2078 if (DISPLAY_VER(dev_priv) < 9)
2079 intel_disable_primary_plane(new_crtc_state);
2081 hsw_set_linetime_wm(new_crtc_state);
2083 if (DISPLAY_VER(dev_priv) >= 11)
2084 icl_set_pipe_chicken(new_crtc_state);
2086 intel_initial_watermarks(state, crtc);
2088 if (DISPLAY_VER(dev_priv) >= 11) {
2089 const struct intel_dbuf_state *dbuf_state =
2090 intel_atomic_get_new_dbuf_state(state);
2092 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2095 if (new_crtc_state->bigjoiner_slave)
2096 intel_crtc_vblank_on(new_crtc_state);
2098 intel_encoders_enable(state, crtc);
2100 if (psl_clkgate_wa) {
2101 intel_crtc_wait_for_next_vblank(crtc);
2102 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2105 /* If we change the relative order between pipe/planes enabling, we need
2106 * to change the workaround. */
2107 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2108 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2109 struct intel_crtc *wa_crtc;
2111 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
2113 intel_crtc_wait_for_next_vblank(wa_crtc);
2114 intel_crtc_wait_for_next_vblank(wa_crtc);
2118 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2120 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2122 enum pipe pipe = crtc->pipe;
2124 /* To avoid upsetting the power well on haswell only disable the pfit if
2125 * it's in use. The hw state code will make sure we get this right. */
2126 if (!old_crtc_state->pch_pfit.enabled)
2129 intel_de_write(dev_priv, PF_CTL(pipe), 0);
2130 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2131 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2134 static void ilk_crtc_disable(struct intel_atomic_state *state,
2135 struct intel_crtc *crtc)
2137 const struct intel_crtc_state *old_crtc_state =
2138 intel_atomic_get_old_crtc_state(state, crtc);
2139 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2140 enum pipe pipe = crtc->pipe;
2143 * Sometimes spurious CPU pipe underruns happen when the
2144 * pipe is already disabled, but FDI RX/TX is still enabled.
2145 * Happens at least with VGA+HDMI cloning. Suppress them.
2147 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2148 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2150 intel_encoders_disable(state, crtc);
2152 intel_crtc_vblank_off(old_crtc_state);
2154 intel_disable_transcoder(old_crtc_state);
2156 ilk_pfit_disable(old_crtc_state);
2158 if (old_crtc_state->has_pch_encoder)
2159 ilk_pch_disable(state, crtc);
2161 intel_encoders_post_disable(state, crtc);
2163 if (old_crtc_state->has_pch_encoder)
2164 ilk_pch_post_disable(state, crtc);
2166 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2167 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2170 static void hsw_crtc_disable(struct intel_atomic_state *state,
2171 struct intel_crtc *crtc)
2173 const struct intel_crtc_state *old_crtc_state =
2174 intel_atomic_get_old_crtc_state(state, crtc);
2177 * FIXME collapse everything to one hook.
2178 * Need care with mst->ddi interactions.
2180 if (!old_crtc_state->bigjoiner_slave) {
2181 intel_encoders_disable(state, crtc);
2182 intel_encoders_post_disable(state, crtc);
2186 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2188 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2189 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2191 if (!crtc_state->gmch_pfit.control)
2195 * The panel fitter should only be adjusted whilst the pipe is disabled,
2196 * according to register description and PRM.
2198 drm_WARN_ON(&dev_priv->drm,
2199 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2200 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2202 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2203 crtc_state->gmch_pfit.pgm_ratios);
2204 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2206 /* Border color in case we don't scale up to the full screen. Black by
2207 * default, change to something else for debugging. */
2208 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2211 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2213 if (phy == PHY_NONE)
2215 else if (IS_DG2(dev_priv))
2217 * DG2 outputs labelled as "combo PHY" in the bspec use
2218 * SNPS PHYs with completely different programming,
2219 * hence we always return false here.
2222 else if (IS_ALDERLAKE_S(dev_priv))
2223 return phy <= PHY_E;
2224 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2225 return phy <= PHY_D;
2226 else if (IS_JSL_EHL(dev_priv))
2227 return phy <= PHY_C;
2228 else if (DISPLAY_VER(dev_priv) >= 11)
2229 return phy <= PHY_B;
2234 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2236 if (IS_DG2(dev_priv))
2237 /* DG2's "TC1" output uses a SNPS PHY */
2239 else if (IS_ALDERLAKE_P(dev_priv))
2240 return phy >= PHY_F && phy <= PHY_I;
2241 else if (IS_TIGERLAKE(dev_priv))
2242 return phy >= PHY_D && phy <= PHY_I;
2243 else if (IS_ICELAKE(dev_priv))
2244 return phy >= PHY_C && phy <= PHY_F;
2249 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2251 if (phy == PHY_NONE)
2253 else if (IS_DG2(dev_priv))
2255 * All four "combo" ports and the TC1 port (PHY E) use
2258 return phy <= PHY_E;
2263 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2265 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2266 return PHY_D + port - PORT_D_XELPD;
2267 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2268 return PHY_F + port - PORT_TC1;
2269 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2270 return PHY_B + port - PORT_TC1;
2271 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2272 return PHY_C + port - PORT_TC1;
2273 else if (IS_JSL_EHL(i915) && port == PORT_D)
2276 return PHY_A + port - PORT_A;
2279 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2281 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2282 return TC_PORT_NONE;
2284 if (DISPLAY_VER(dev_priv) >= 12)
2285 return TC_PORT_1 + port - PORT_TC1;
2287 return TC_PORT_1 + port - PORT_C;
2290 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2294 return POWER_DOMAIN_PORT_DDI_A_LANES;
2296 return POWER_DOMAIN_PORT_DDI_B_LANES;
2298 return POWER_DOMAIN_PORT_DDI_C_LANES;
2300 return POWER_DOMAIN_PORT_DDI_D_LANES;
2302 return POWER_DOMAIN_PORT_DDI_E_LANES;
2304 return POWER_DOMAIN_PORT_DDI_F_LANES;
2306 return POWER_DOMAIN_PORT_DDI_G_LANES;
2308 return POWER_DOMAIN_PORT_DDI_H_LANES;
2310 return POWER_DOMAIN_PORT_DDI_I_LANES;
2313 return POWER_DOMAIN_PORT_OTHER;
2317 enum intel_display_power_domain
2318 intel_aux_power_domain(struct intel_digital_port *dig_port)
2320 if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2321 switch (dig_port->aux_ch) {
2323 return POWER_DOMAIN_AUX_C_TBT;
2325 return POWER_DOMAIN_AUX_D_TBT;
2327 return POWER_DOMAIN_AUX_E_TBT;
2329 return POWER_DOMAIN_AUX_F_TBT;
2331 return POWER_DOMAIN_AUX_G_TBT;
2333 return POWER_DOMAIN_AUX_H_TBT;
2335 return POWER_DOMAIN_AUX_I_TBT;
2337 MISSING_CASE(dig_port->aux_ch);
2338 return POWER_DOMAIN_AUX_C_TBT;
2342 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2346 * Converts aux_ch to power_domain without caring about TBT ports for that use
2347 * intel_aux_power_domain()
2349 enum intel_display_power_domain
2350 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2354 return POWER_DOMAIN_AUX_A;
2356 return POWER_DOMAIN_AUX_B;
2358 return POWER_DOMAIN_AUX_C;
2360 return POWER_DOMAIN_AUX_D;
2362 return POWER_DOMAIN_AUX_E;
2364 return POWER_DOMAIN_AUX_F;
2366 return POWER_DOMAIN_AUX_G;
2368 return POWER_DOMAIN_AUX_H;
2370 return POWER_DOMAIN_AUX_I;
2372 MISSING_CASE(aux_ch);
2373 return POWER_DOMAIN_AUX_A;
2377 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2379 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2380 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2381 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2382 struct drm_encoder *encoder;
2383 enum pipe pipe = crtc->pipe;
2386 if (!crtc_state->hw.active)
2389 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2390 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2391 if (crtc_state->pch_pfit.enabled ||
2392 crtc_state->pch_pfit.force_thru)
2393 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2395 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2396 crtc_state->uapi.encoder_mask) {
2397 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2399 mask |= BIT_ULL(intel_encoder->power_domain);
2402 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2403 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2405 if (crtc_state->shared_dpll)
2406 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2408 if (crtc_state->dsc.compression_enable)
2409 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2415 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2417 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2418 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2419 enum intel_display_power_domain domain;
2420 u64 domains, new_domains, old_domains;
2422 domains = get_crtc_power_domains(crtc_state);
2424 new_domains = domains & ~crtc->enabled_power_domains.mask;
2425 old_domains = crtc->enabled_power_domains.mask & ~domains;
2427 for_each_power_domain(domain, new_domains)
2428 intel_display_power_get_in_set(dev_priv,
2429 &crtc->enabled_power_domains,
2435 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2438 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2439 &crtc->enabled_power_domains,
2443 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2444 struct intel_crtc *crtc)
2446 const struct intel_crtc_state *new_crtc_state =
2447 intel_atomic_get_new_crtc_state(state, crtc);
2448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2449 enum pipe pipe = crtc->pipe;
2451 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2454 if (intel_crtc_has_dp_encoder(new_crtc_state))
2455 intel_dp_set_m_n(new_crtc_state, M1_N1);
2457 intel_set_transcoder_timings(new_crtc_state);
2458 intel_set_pipe_src_size(new_crtc_state);
2460 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2461 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2462 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2465 i9xx_set_pipeconf(new_crtc_state);
2467 crtc->active = true;
2469 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2471 intel_encoders_pre_pll_enable(state, crtc);
2473 if (IS_CHERRYVIEW(dev_priv))
2474 chv_enable_pll(new_crtc_state);
2476 vlv_enable_pll(new_crtc_state);
2478 intel_encoders_pre_enable(state, crtc);
2480 i9xx_pfit_enable(new_crtc_state);
2482 intel_color_load_luts(new_crtc_state);
2483 intel_color_commit(new_crtc_state);
2484 /* update DSPCNTR to configure gamma for pipe bottom color */
2485 intel_disable_primary_plane(new_crtc_state);
2487 intel_initial_watermarks(state, crtc);
2488 intel_enable_transcoder(new_crtc_state);
2490 intel_crtc_vblank_on(new_crtc_state);
2492 intel_encoders_enable(state, crtc);
2495 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2496 struct intel_crtc *crtc)
2498 const struct intel_crtc_state *new_crtc_state =
2499 intel_atomic_get_new_crtc_state(state, crtc);
2500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2501 enum pipe pipe = crtc->pipe;
2503 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2506 if (intel_crtc_has_dp_encoder(new_crtc_state))
2507 intel_dp_set_m_n(new_crtc_state, M1_N1);
2509 intel_set_transcoder_timings(new_crtc_state);
2510 intel_set_pipe_src_size(new_crtc_state);
2512 i9xx_set_pipeconf(new_crtc_state);
2514 crtc->active = true;
2516 if (DISPLAY_VER(dev_priv) != 2)
2517 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2519 intel_encoders_pre_enable(state, crtc);
2521 i9xx_enable_pll(new_crtc_state);
2523 i9xx_pfit_enable(new_crtc_state);
2525 intel_color_load_luts(new_crtc_state);
2526 intel_color_commit(new_crtc_state);
2527 /* update DSPCNTR to configure gamma for pipe bottom color */
2528 intel_disable_primary_plane(new_crtc_state);
2530 if (!intel_initial_watermarks(state, crtc))
2531 intel_update_watermarks(dev_priv);
2532 intel_enable_transcoder(new_crtc_state);
2534 intel_crtc_vblank_on(new_crtc_state);
2536 intel_encoders_enable(state, crtc);
2538 /* prevents spurious underruns */
2539 if (DISPLAY_VER(dev_priv) == 2)
2540 intel_crtc_wait_for_next_vblank(crtc);
2543 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2545 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2546 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2548 if (!old_crtc_state->gmch_pfit.control)
2551 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2553 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2554 intel_de_read(dev_priv, PFIT_CONTROL));
2555 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2558 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2559 struct intel_crtc *crtc)
2561 struct intel_crtc_state *old_crtc_state =
2562 intel_atomic_get_old_crtc_state(state, crtc);
2563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2564 enum pipe pipe = crtc->pipe;
2567 * On gen2 planes are double buffered but the pipe isn't, so we must
2568 * wait for planes to fully turn off before disabling the pipe.
2570 if (DISPLAY_VER(dev_priv) == 2)
2571 intel_crtc_wait_for_next_vblank(crtc);
2573 intel_encoders_disable(state, crtc);
2575 intel_crtc_vblank_off(old_crtc_state);
2577 intel_disable_transcoder(old_crtc_state);
2579 i9xx_pfit_disable(old_crtc_state);
2581 intel_encoders_post_disable(state, crtc);
2583 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2584 if (IS_CHERRYVIEW(dev_priv))
2585 chv_disable_pll(dev_priv, pipe);
2586 else if (IS_VALLEYVIEW(dev_priv))
2587 vlv_disable_pll(dev_priv, pipe);
2589 i9xx_disable_pll(old_crtc_state);
2592 intel_encoders_post_pll_disable(state, crtc);
2594 if (DISPLAY_VER(dev_priv) != 2)
2595 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2597 if (!dev_priv->wm_disp->initial_watermarks)
2598 intel_update_watermarks(dev_priv);
2600 /* clock the pipe down to 640x480@60 to potentially save power */
2601 if (IS_I830(dev_priv))
2602 i830_enable_pipe(dev_priv, pipe);
2605 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2606 struct drm_modeset_acquire_ctx *ctx)
2608 struct intel_encoder *encoder;
2609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2610 struct intel_bw_state *bw_state =
2611 to_intel_bw_state(dev_priv->bw_obj.state);
2612 struct intel_cdclk_state *cdclk_state =
2613 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2614 struct intel_dbuf_state *dbuf_state =
2615 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2616 struct intel_crtc_state *crtc_state =
2617 to_intel_crtc_state(crtc->base.state);
2618 struct intel_plane *plane;
2619 struct drm_atomic_state *state;
2620 struct intel_crtc_state *temp_crtc_state;
2621 enum pipe pipe = crtc->pipe;
2624 if (!crtc_state->hw.active)
2627 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2628 const struct intel_plane_state *plane_state =
2629 to_intel_plane_state(plane->base.state);
2631 if (plane_state->uapi.visible)
2632 intel_plane_disable_noatomic(crtc, plane);
2635 state = drm_atomic_state_alloc(&dev_priv->drm);
2637 drm_dbg_kms(&dev_priv->drm,
2638 "failed to disable [CRTC:%d:%s], out of memory",
2639 crtc->base.base.id, crtc->base.name);
2643 state->acquire_ctx = ctx;
2645 /* Everything's already locked, -EDEADLK can't happen. */
2646 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2647 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2649 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2651 dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2653 drm_atomic_state_put(state);
2655 drm_dbg_kms(&dev_priv->drm,
2656 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2657 crtc->base.base.id, crtc->base.name);
2659 crtc->active = false;
2660 crtc->base.enabled = false;
2662 drm_WARN_ON(&dev_priv->drm,
2663 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2664 crtc_state->uapi.active = false;
2665 crtc_state->uapi.connector_mask = 0;
2666 crtc_state->uapi.encoder_mask = 0;
2667 intel_crtc_free_hw_state(crtc_state);
2668 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2670 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2671 encoder->base.crtc = NULL;
2673 intel_fbc_disable(crtc);
2674 intel_update_watermarks(dev_priv);
2675 intel_disable_shared_dpll(crtc_state);
2677 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2679 cdclk_state->min_cdclk[pipe] = 0;
2680 cdclk_state->min_voltage_level[pipe] = 0;
2681 cdclk_state->active_pipes &= ~BIT(pipe);
2683 dbuf_state->active_pipes &= ~BIT(pipe);
2685 bw_state->data_rate[pipe] = 0;
2686 bw_state->num_active_planes[pipe] = 0;
2690 * turn all crtc's off, but do not adjust state
2691 * This has to be paired with a call to intel_modeset_setup_hw_state.
2693 int intel_display_suspend(struct drm_device *dev)
2695 struct drm_i915_private *dev_priv = to_i915(dev);
2696 struct drm_atomic_state *state;
2699 if (!HAS_DISPLAY(dev_priv))
2702 state = drm_atomic_helper_suspend(dev);
2703 ret = PTR_ERR_OR_ZERO(state);
2705 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2708 dev_priv->modeset_restore_state = state;
2712 void intel_encoder_destroy(struct drm_encoder *encoder)
2714 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2716 drm_encoder_cleanup(encoder);
2717 kfree(intel_encoder);
2720 /* Cross check the actual hw state with our own modeset state tracking (and it's
2721 * internal consistency). */
2722 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2723 struct drm_connector_state *conn_state)
2725 struct intel_connector *connector = to_intel_connector(conn_state->connector);
2726 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2728 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2729 connector->base.base.id, connector->base.name);
2731 if (connector->get_hw_state(connector)) {
2732 struct intel_encoder *encoder = intel_attached_encoder(connector);
2734 I915_STATE_WARN(!crtc_state,
2735 "connector enabled without attached crtc\n");
2740 I915_STATE_WARN(!crtc_state->hw.active,
2741 "connector is active, but attached crtc isn't\n");
2743 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2746 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2747 "atomic encoder doesn't match attached encoder\n");
2749 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2750 "attached encoder crtc differs from connector crtc\n");
2752 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2753 "attached crtc is active, but connector isn't\n");
2754 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2755 "best encoder set without crtc!\n");
2759 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2761 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2762 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2764 /* IPS only exists on ULT machines and is tied to pipe A. */
2765 if (!hsw_crtc_supports_ips(crtc))
2768 if (!dev_priv->params.enable_ips)
2771 if (crtc_state->pipe_bpp > 24)
2775 * We compare against max which means we must take
2776 * the increased cdclk requirement into account when
2777 * calculating the new cdclk.
2779 * Should measure whether using a lower cdclk w/o IPS
2781 if (IS_BROADWELL(dev_priv) &&
2782 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2788 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2790 struct drm_i915_private *dev_priv =
2791 to_i915(crtc_state->uapi.crtc->dev);
2792 struct intel_atomic_state *state =
2793 to_intel_atomic_state(crtc_state->uapi.state);
2795 crtc_state->ips_enabled = false;
2797 if (!hsw_crtc_state_ips_capable(crtc_state))
2801 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2802 * enabled and disabled dynamically based on package C states,
2803 * user space can't make reliable use of the CRCs, so let's just
2804 * completely disable it.
2806 if (crtc_state->crc_enabled)
2809 /* IPS should be fine as long as at least one plane is enabled. */
2810 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2813 if (IS_BROADWELL(dev_priv)) {
2814 const struct intel_cdclk_state *cdclk_state;
2816 cdclk_state = intel_atomic_get_cdclk_state(state);
2817 if (IS_ERR(cdclk_state))
2818 return PTR_ERR(cdclk_state);
2820 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2821 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2825 crtc_state->ips_enabled = true;
2830 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2832 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2834 /* GDG double wide on either pipe, otherwise pipe A only */
2835 return DISPLAY_VER(dev_priv) < 4 &&
2836 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2839 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2841 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2842 struct drm_rect src;
2845 * We only use IF-ID interlacing. If we ever use
2846 * PF-ID we'll need to adjust the pixel_rate here.
2849 if (!crtc_state->pch_pfit.enabled)
2852 drm_rect_init(&src, 0, 0,
2853 crtc_state->pipe_src_w << 16,
2854 crtc_state->pipe_src_h << 16);
2856 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2860 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2861 const struct drm_display_mode *timings)
2863 mode->hdisplay = timings->crtc_hdisplay;
2864 mode->htotal = timings->crtc_htotal;
2865 mode->hsync_start = timings->crtc_hsync_start;
2866 mode->hsync_end = timings->crtc_hsync_end;
2868 mode->vdisplay = timings->crtc_vdisplay;
2869 mode->vtotal = timings->crtc_vtotal;
2870 mode->vsync_start = timings->crtc_vsync_start;
2871 mode->vsync_end = timings->crtc_vsync_end;
2873 mode->flags = timings->flags;
2874 mode->type = DRM_MODE_TYPE_DRIVER;
2876 mode->clock = timings->crtc_clock;
2878 drm_mode_set_name(mode);
2881 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2883 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2885 if (HAS_GMCH(dev_priv))
2886 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2887 crtc_state->pixel_rate =
2888 crtc_state->hw.pipe_mode.crtc_clock;
2890 crtc_state->pixel_rate =
2891 ilk_pipe_pixel_rate(crtc_state);
2894 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2896 struct drm_display_mode *mode = &crtc_state->hw.mode;
2897 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2898 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2900 drm_mode_copy(pipe_mode, adjusted_mode);
2902 if (crtc_state->bigjoiner) {
2904 * transcoder is programmed to the full mode,
2905 * but pipe timings are half of the transcoder mode
2907 pipe_mode->crtc_hdisplay /= 2;
2908 pipe_mode->crtc_hblank_start /= 2;
2909 pipe_mode->crtc_hblank_end /= 2;
2910 pipe_mode->crtc_hsync_start /= 2;
2911 pipe_mode->crtc_hsync_end /= 2;
2912 pipe_mode->crtc_htotal /= 2;
2913 pipe_mode->crtc_clock /= 2;
2916 if (crtc_state->splitter.enable) {
2917 int n = crtc_state->splitter.link_count;
2918 int overlap = crtc_state->splitter.pixel_overlap;
2921 * eDP MSO uses segment timings from EDID for transcoder
2922 * timings, but full mode for everything else.
2924 * h_full = (h_segment - pixel_overlap) * link_count
2926 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2927 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2928 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2929 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2930 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2931 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2932 pipe_mode->crtc_clock *= n;
2934 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2935 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2937 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2938 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2941 intel_crtc_compute_pixel_rate(crtc_state);
2943 drm_mode_copy(mode, adjusted_mode);
2944 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2945 mode->vdisplay = crtc_state->pipe_src_h;
2948 static void intel_encoder_get_config(struct intel_encoder *encoder,
2949 struct intel_crtc_state *crtc_state)
2951 encoder->get_config(encoder, crtc_state);
2953 intel_crtc_readout_derived_state(crtc_state);
2956 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2957 struct intel_crtc_state *pipe_config)
2959 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2960 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2961 int clock_limit = dev_priv->max_dotclk_freq;
2963 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2965 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2966 if (pipe_config->bigjoiner) {
2967 pipe_mode->crtc_clock /= 2;
2968 pipe_mode->crtc_hdisplay /= 2;
2969 pipe_mode->crtc_hblank_start /= 2;
2970 pipe_mode->crtc_hblank_end /= 2;
2971 pipe_mode->crtc_hsync_start /= 2;
2972 pipe_mode->crtc_hsync_end /= 2;
2973 pipe_mode->crtc_htotal /= 2;
2974 pipe_config->pipe_src_w /= 2;
2977 if (pipe_config->splitter.enable) {
2978 int n = pipe_config->splitter.link_count;
2979 int overlap = pipe_config->splitter.pixel_overlap;
2981 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2982 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2983 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2984 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2985 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2986 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2987 pipe_mode->crtc_clock *= n;
2990 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2992 if (DISPLAY_VER(dev_priv) < 4) {
2993 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
2996 * Enable double wide mode when the dot clock
2997 * is > 90% of the (display) core speed.
2999 if (intel_crtc_supports_double_wide(crtc) &&
3000 pipe_mode->crtc_clock > clock_limit) {
3001 clock_limit = dev_priv->max_dotclk_freq;
3002 pipe_config->double_wide = true;
3006 if (pipe_mode->crtc_clock > clock_limit) {
3007 drm_dbg_kms(&dev_priv->drm,
3008 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3009 pipe_mode->crtc_clock, clock_limit,
3010 yesno(pipe_config->double_wide));
3015 * Pipe horizontal size must be even in:
3017 * - LVDS dual channel mode
3018 * - Double wide pipe
3020 if (pipe_config->pipe_src_w & 1) {
3021 if (pipe_config->double_wide) {
3022 drm_dbg_kms(&dev_priv->drm,
3023 "Odd pipe source width not supported with double wide pipe\n");
3027 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3028 intel_is_dual_link_lvds(dev_priv)) {
3029 drm_dbg_kms(&dev_priv->drm,
3030 "Odd pipe source width not supported with dual link LVDS\n");
3035 intel_crtc_compute_pixel_rate(pipe_config);
3037 if (pipe_config->has_pch_encoder)
3038 return ilk_fdi_compute_config(crtc, pipe_config);
3044 intel_reduce_m_n_ratio(u32 *num, u32 *den)
3046 while (*num > DATA_LINK_M_N_MASK ||
3047 *den > DATA_LINK_M_N_MASK) {
3053 static void compute_m_n(unsigned int m, unsigned int n,
3054 u32 *ret_m, u32 *ret_n,
3058 * Several DP dongles in particular seem to be fussy about
3059 * too large link M/N values. Give N value as 0x8000 that
3060 * should be acceptable by specific devices. 0x8000 is the
3061 * specified fixed N value for asynchronous clock mode,
3062 * which the devices expect also in synchronous clock mode.
3065 *ret_n = DP_LINK_CONSTANT_N_VALUE;
3067 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3069 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3070 intel_reduce_m_n_ratio(ret_m, ret_n);
3074 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3075 int pixel_clock, int link_clock,
3076 struct intel_link_m_n *m_n,
3077 bool constant_n, bool fec_enable)
3079 u32 data_clock = bits_per_pixel * pixel_clock;
3082 data_clock = intel_dp_mode_to_fec_clock(data_clock);
3085 compute_m_n(data_clock,
3086 link_clock * nlanes * 8,
3087 &m_n->gmch_m, &m_n->gmch_n,
3090 compute_m_n(pixel_clock, link_clock,
3091 &m_n->link_m, &m_n->link_n,
3095 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3098 * There may be no VBT; and if the BIOS enabled SSC we can
3099 * just keep using it to avoid unnecessary flicker. Whereas if the
3100 * BIOS isn't using it, don't assume it will work even if the VBT
3101 * indicates as much.
3103 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3104 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3108 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3109 drm_dbg_kms(&dev_priv->drm,
3110 "SSC %s by BIOS, overriding VBT which says %s\n",
3111 enableddisabled(bios_lvds_use_ssc),
3112 enableddisabled(dev_priv->vbt.lvds_use_ssc));
3113 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3118 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3119 const struct intel_link_m_n *m_n)
3121 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3122 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3123 enum pipe pipe = crtc->pipe;
3125 intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
3126 TU_SIZE(m_n->tu) | m_n->gmch_m);
3127 intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
3128 intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
3129 intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
3132 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3133 enum transcoder transcoder)
3135 if (IS_HASWELL(dev_priv))
3136 return transcoder == TRANSCODER_EDP;
3139 * Strictly speaking some registers are available before
3140 * gen7, but we only support DRRS on gen7+
3142 return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3145 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3146 const struct intel_link_m_n *m_n,
3147 const struct intel_link_m_n *m2_n2)
3149 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3150 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3151 enum pipe pipe = crtc->pipe;
3152 enum transcoder transcoder = crtc_state->cpu_transcoder;
3154 if (DISPLAY_VER(dev_priv) >= 5) {
3155 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
3156 TU_SIZE(m_n->tu) | m_n->gmch_m);
3157 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
3159 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
3161 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
3164 * M2_N2 registers are set only if DRRS is supported
3165 * (to make sure the registers are not unnecessarily accessed).
3167 if (m2_n2 && crtc_state->has_drrs &&
3168 transcoder_has_m2_n2(dev_priv, transcoder)) {
3169 intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
3170 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
3171 intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
3173 intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
3175 intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
3179 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
3180 TU_SIZE(m_n->tu) | m_n->gmch_m);
3181 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
3182 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
3183 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
3187 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
3189 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
3190 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3193 dp_m_n = &crtc_state->dp_m_n;
3194 dp_m2_n2 = &crtc_state->dp_m2_n2;
3195 } else if (m_n == M2_N2) {
3198 * M2_N2 registers are not supported. Hence m2_n2 divider value
3199 * needs to be programmed into M1_N1.
3201 dp_m_n = &crtc_state->dp_m2_n2;
3203 drm_err(&i915->drm, "Unsupported divider value\n");
3207 if (crtc_state->has_pch_encoder)
3208 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
3210 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
3213 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3215 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3216 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3217 enum pipe pipe = crtc->pipe;
3218 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3219 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3220 u32 crtc_vtotal, crtc_vblank_end;
3223 /* We need to be careful not to changed the adjusted mode, for otherwise
3224 * the hw state checker will get angry at the mismatch. */
3225 crtc_vtotal = adjusted_mode->crtc_vtotal;
3226 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3228 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3229 /* the chip adds 2 halflines automatically */
3231 crtc_vblank_end -= 1;
3233 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3234 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3236 vsyncshift = adjusted_mode->crtc_hsync_start -
3237 adjusted_mode->crtc_htotal / 2;
3239 vsyncshift += adjusted_mode->crtc_htotal;
3242 if (DISPLAY_VER(dev_priv) > 3)
3243 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3246 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3247 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3248 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3249 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3250 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3251 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3253 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3254 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3255 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3256 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3257 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3258 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3260 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3261 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3262 * documented on the DDI_FUNC_CTL register description, EDP Input Select
3264 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3265 (pipe == PIPE_B || pipe == PIPE_C))
3266 intel_de_write(dev_priv, VTOTAL(pipe),
3267 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3271 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3273 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3275 enum pipe pipe = crtc->pipe;
3277 /* pipesrc controls the size that is scaled from, which should
3278 * always be the user's requested size.
3280 intel_de_write(dev_priv, PIPESRC(pipe),
3281 ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
3284 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3286 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3287 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3289 if (DISPLAY_VER(dev_priv) == 2)
3292 if (DISPLAY_VER(dev_priv) >= 9 ||
3293 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3294 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3296 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3299 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3300 struct intel_crtc_state *pipe_config)
3302 struct drm_device *dev = crtc->base.dev;
3303 struct drm_i915_private *dev_priv = to_i915(dev);
3304 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3307 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3308 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3309 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3311 if (!transcoder_is_dsi(cpu_transcoder)) {
3312 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3313 pipe_config->hw.adjusted_mode.crtc_hblank_start =
3315 pipe_config->hw.adjusted_mode.crtc_hblank_end =
3316 ((tmp >> 16) & 0xffff) + 1;
3318 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3319 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3320 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3322 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3323 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3324 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3326 if (!transcoder_is_dsi(cpu_transcoder)) {
3327 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3328 pipe_config->hw.adjusted_mode.crtc_vblank_start =
3330 pipe_config->hw.adjusted_mode.crtc_vblank_end =
3331 ((tmp >> 16) & 0xffff) + 1;
3333 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3334 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3335 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3337 if (intel_pipe_is_interlaced(pipe_config)) {
3338 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3339 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3340 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3344 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3345 struct intel_crtc_state *pipe_config)
3347 struct drm_device *dev = crtc->base.dev;
3348 struct drm_i915_private *dev_priv = to_i915(dev);
3351 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3352 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
3353 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
3356 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3358 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3359 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3364 /* we keep both pipes enabled on 830 */
3365 if (IS_I830(dev_priv))
3366 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3368 if (crtc_state->double_wide)
3369 pipeconf |= PIPECONF_DOUBLE_WIDE;
3371 /* only g4x and later have fancy bpc/dither controls */
3372 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3373 IS_CHERRYVIEW(dev_priv)) {
3374 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3375 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3376 pipeconf |= PIPECONF_DITHER_EN |
3377 PIPECONF_DITHER_TYPE_SP;
3379 switch (crtc_state->pipe_bpp) {
3381 pipeconf |= PIPECONF_6BPC;
3384 pipeconf |= PIPECONF_8BPC;
3387 pipeconf |= PIPECONF_10BPC;
3390 /* Case prevented by intel_choose_pipe_bpp_dither. */
3395 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3396 if (DISPLAY_VER(dev_priv) < 4 ||
3397 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3398 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3400 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3402 pipeconf |= PIPECONF_PROGRESSIVE;
3405 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3406 crtc_state->limited_color_range)
3407 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3409 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3411 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3413 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3414 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3417 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3419 if (IS_I830(dev_priv))
3422 return DISPLAY_VER(dev_priv) >= 4 ||
3423 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3426 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3428 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3432 if (!i9xx_has_pfit(dev_priv))
3435 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3436 if (!(tmp & PFIT_ENABLE))
3439 /* Check whether the pfit is attached to our pipe. */
3440 if (DISPLAY_VER(dev_priv) < 4) {
3441 if (crtc->pipe != PIPE_B)
3444 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3448 crtc_state->gmch_pfit.control = tmp;
3449 crtc_state->gmch_pfit.pgm_ratios =
3450 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3453 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3454 struct intel_crtc_state *pipe_config)
3456 struct drm_device *dev = crtc->base.dev;
3457 struct drm_i915_private *dev_priv = to_i915(dev);
3458 enum pipe pipe = crtc->pipe;
3461 int refclk = 100000;
3463 /* In case of DSI, DPLL will not be used */
3464 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3467 vlv_dpio_get(dev_priv);
3468 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3469 vlv_dpio_put(dev_priv);
3471 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3472 clock.m2 = mdiv & DPIO_M2DIV_MASK;
3473 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3474 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3475 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3477 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3480 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3481 struct intel_crtc_state *pipe_config)
3483 struct drm_device *dev = crtc->base.dev;
3484 struct drm_i915_private *dev_priv = to_i915(dev);
3485 enum pipe pipe = crtc->pipe;
3486 enum dpio_channel port = vlv_pipe_to_channel(pipe);
3488 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3489 int refclk = 100000;
3491 /* In case of DSI, DPLL will not be used */
3492 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3495 vlv_dpio_get(dev_priv);
3496 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3497 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3498 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3499 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3500 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3501 vlv_dpio_put(dev_priv);
3503 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3504 clock.m2 = (pll_dw0 & 0xff) << 22;
3505 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3506 clock.m2 |= pll_dw2 & 0x3fffff;
3507 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3508 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3509 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3511 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3514 static enum intel_output_format
3515 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3517 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3520 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3522 if (tmp & PIPEMISC_YUV420_ENABLE) {
3523 /* We support 4:2:0 in full blend mode only */
3524 drm_WARN_ON(&dev_priv->drm,
3525 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3527 return INTEL_OUTPUT_FORMAT_YCBCR420;
3528 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3529 return INTEL_OUTPUT_FORMAT_YCBCR444;
3531 return INTEL_OUTPUT_FORMAT_RGB;
3535 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3537 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3538 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3540 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3543 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3545 if (tmp & DISP_PIPE_GAMMA_ENABLE)
3546 crtc_state->gamma_enable = true;
3548 if (!HAS_GMCH(dev_priv) &&
3549 tmp & DISP_PIPE_CSC_ENABLE)
3550 crtc_state->csc_enable = true;
3553 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3554 struct intel_crtc_state *pipe_config)
3556 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3557 enum intel_display_power_domain power_domain;
3558 intel_wakeref_t wakeref;
3562 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3563 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3567 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3568 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3569 pipe_config->shared_dpll = NULL;
3573 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3574 if (!(tmp & PIPECONF_ENABLE))
3577 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3578 IS_CHERRYVIEW(dev_priv)) {
3579 switch (tmp & PIPECONF_BPC_MASK) {
3581 pipe_config->pipe_bpp = 18;
3584 pipe_config->pipe_bpp = 24;
3586 case PIPECONF_10BPC:
3587 pipe_config->pipe_bpp = 30;
3594 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3595 (tmp & PIPECONF_COLOR_RANGE_SELECT))
3596 pipe_config->limited_color_range = true;
3598 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
3599 PIPECONF_GAMMA_MODE_SHIFT;
3601 if (IS_CHERRYVIEW(dev_priv))
3602 pipe_config->cgm_mode = intel_de_read(dev_priv,
3603 CGM_PIPE_MODE(crtc->pipe));
3605 i9xx_get_pipe_color_config(pipe_config);
3606 intel_color_get_config(pipe_config);
3608 if (DISPLAY_VER(dev_priv) < 4)
3609 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3611 intel_get_transcoder_timings(crtc, pipe_config);
3612 intel_get_pipe_src_size(crtc, pipe_config);
3614 i9xx_get_pfit_config(pipe_config);
3616 if (DISPLAY_VER(dev_priv) >= 4) {
3617 /* No way to read it out on pipes B and C */
3618 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3619 tmp = dev_priv->chv_dpll_md[crtc->pipe];
3621 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3622 pipe_config->pixel_multiplier =
3623 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3624 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3625 pipe_config->dpll_hw_state.dpll_md = tmp;
3626 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3627 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3628 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3629 pipe_config->pixel_multiplier =
3630 ((tmp & SDVO_MULTIPLIER_MASK)
3631 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3633 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3634 * port and will be fixed up in the encoder->get_config
3636 pipe_config->pixel_multiplier = 1;
3638 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3640 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3641 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3643 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3646 /* Mask out read-only status bits. */
3647 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3648 DPLL_PORTC_READY_MASK |
3649 DPLL_PORTB_READY_MASK);
3652 if (IS_CHERRYVIEW(dev_priv))
3653 chv_crtc_clock_get(crtc, pipe_config);
3654 else if (IS_VALLEYVIEW(dev_priv))
3655 vlv_crtc_clock_get(crtc, pipe_config);
3657 i9xx_crtc_clock_get(crtc, pipe_config);
3660 * Normally the dotclock is filled in by the encoder .get_config()
3661 * but in case the pipe is enabled w/o any ports we need a sane
3664 pipe_config->hw.adjusted_mode.crtc_clock =
3665 pipe_config->port_clock / pipe_config->pixel_multiplier;
3670 intel_display_power_put(dev_priv, power_domain, wakeref);
3675 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3677 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3678 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3679 enum pipe pipe = crtc->pipe;
3684 switch (crtc_state->pipe_bpp) {
3686 val |= PIPECONF_6BPC;
3689 val |= PIPECONF_8BPC;
3692 val |= PIPECONF_10BPC;
3695 val |= PIPECONF_12BPC;
3698 /* Case prevented by intel_choose_pipe_bpp_dither. */
3702 if (crtc_state->dither)
3703 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3705 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3706 val |= PIPECONF_INTERLACED_ILK;
3708 val |= PIPECONF_PROGRESSIVE;
3711 * This would end up with an odd purple hue over
3712 * the entire display. Make sure we don't do it.
3714 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3715 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3717 if (crtc_state->limited_color_range &&
3718 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3719 val |= PIPECONF_COLOR_RANGE_SELECT;
3721 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3722 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3724 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3726 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3728 intel_de_write(dev_priv, PIPECONF(pipe), val);
3729 intel_de_posting_read(dev_priv, PIPECONF(pipe));
3732 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3734 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3735 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3736 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3739 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3740 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3742 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3743 val |= PIPECONF_INTERLACED_ILK;
3745 val |= PIPECONF_PROGRESSIVE;
3747 if (IS_HASWELL(dev_priv) &&
3748 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3749 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3751 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3752 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3755 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3757 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3758 const struct intel_crtc_scaler_state *scaler_state =
3759 &crtc_state->scaler_state;
3761 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3765 switch (crtc_state->pipe_bpp) {
3767 val |= PIPEMISC_6_BPC;
3770 val |= PIPEMISC_8_BPC;
3773 val |= PIPEMISC_10_BPC;
3776 /* Port output 12BPC defined for ADLP+ */
3777 if (DISPLAY_VER(dev_priv) > 12)
3778 val |= PIPEMISC_12_BPC_ADLP;
3781 MISSING_CASE(crtc_state->pipe_bpp);
3785 if (crtc_state->dither)
3786 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3788 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3789 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3790 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3792 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3793 val |= PIPEMISC_YUV420_ENABLE |
3794 PIPEMISC_YUV420_MODE_FULL_BLEND;
3796 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3797 val |= PIPEMISC_HDR_MODE_PRECISION;
3799 if (DISPLAY_VER(dev_priv) >= 12)
3800 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3802 if (IS_ALDERLAKE_P(dev_priv)) {
3803 bool scaler_in_use = false;
3805 for (i = 0; i < crtc->num_scalers; i++) {
3806 if (!scaler_state->scalers[i].in_use)
3809 scaler_in_use = true;
3813 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3814 PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
3815 scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3816 PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3819 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3822 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3824 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3827 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3829 switch (tmp & PIPEMISC_BPC_MASK) {
3830 case PIPEMISC_6_BPC:
3832 case PIPEMISC_8_BPC:
3834 case PIPEMISC_10_BPC:
3837 * PORT OUTPUT 12 BPC defined for ADLP+.
3840 * For previous platforms with DSI interface, bits 5:7
3841 * are used for storing pipe_bpp irrespective of dithering.
3842 * Since the value of 12 BPC is not defined for these bits
3843 * on older platforms, need to find a workaround for 12 BPC
3844 * MIPI DSI HW readout.
3846 case PIPEMISC_12_BPC_ADLP:
3847 if (DISPLAY_VER(dev_priv) > 12)
3856 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3859 * Account for spread spectrum to avoid
3860 * oversubscribing the link. Max center spread
3861 * is 2.5%; use 5% for safety's sake.
3863 u32 bps = target_clock * bpp * 21 / 20;
3864 return DIV_ROUND_UP(bps, link_bw * 8);
3867 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3868 struct intel_link_m_n *m_n)
3870 struct drm_device *dev = crtc->base.dev;
3871 struct drm_i915_private *dev_priv = to_i915(dev);
3872 enum pipe pipe = crtc->pipe;
3874 m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
3875 m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
3876 m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3878 m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
3879 m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3880 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3883 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3884 enum transcoder transcoder,
3885 struct intel_link_m_n *m_n,
3886 struct intel_link_m_n *m2_n2)
3888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3889 enum pipe pipe = crtc->pipe;
3891 if (DISPLAY_VER(dev_priv) >= 5) {
3892 m_n->link_m = intel_de_read(dev_priv,
3893 PIPE_LINK_M1(transcoder));
3894 m_n->link_n = intel_de_read(dev_priv,
3895 PIPE_LINK_N1(transcoder));
3896 m_n->gmch_m = intel_de_read(dev_priv,
3897 PIPE_DATA_M1(transcoder))
3899 m_n->gmch_n = intel_de_read(dev_priv,
3900 PIPE_DATA_N1(transcoder));
3901 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
3902 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3904 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3905 m2_n2->link_m = intel_de_read(dev_priv,
3906 PIPE_LINK_M2(transcoder));
3907 m2_n2->link_n = intel_de_read(dev_priv,
3908 PIPE_LINK_N2(transcoder));
3909 m2_n2->gmch_m = intel_de_read(dev_priv,
3910 PIPE_DATA_M2(transcoder))
3912 m2_n2->gmch_n = intel_de_read(dev_priv,
3913 PIPE_DATA_N2(transcoder));
3914 m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
3915 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3918 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
3919 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
3920 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3922 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
3923 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3924 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3928 void intel_dp_get_m_n(struct intel_crtc *crtc,
3929 struct intel_crtc_state *pipe_config)
3931 if (pipe_config->has_pch_encoder)
3932 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3934 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3935 &pipe_config->dp_m_n,
3936 &pipe_config->dp_m2_n2);
3939 void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3940 struct intel_crtc_state *pipe_config)
3942 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3943 &pipe_config->fdi_m_n, NULL);
3946 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3949 drm_rect_init(&crtc_state->pch_pfit.dst,
3950 pos >> 16, pos & 0xffff,
3951 size >> 16, size & 0xffff);
3954 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3956 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3957 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3958 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3962 /* find scaler attached to this pipe */
3963 for (i = 0; i < crtc->num_scalers; i++) {
3966 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3967 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3971 crtc_state->pch_pfit.enabled = true;
3973 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3974 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3976 ilk_get_pfit_pos_size(crtc_state, pos, size);
3978 scaler_state->scalers[i].in_use = true;
3982 scaler_state->scaler_id = id;
3984 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3986 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3989 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3991 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3992 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3995 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3996 if ((ctl & PF_ENABLE) == 0)
3999 crtc_state->pch_pfit.enabled = true;
4001 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
4002 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
4004 ilk_get_pfit_pos_size(crtc_state, pos, size);
4007 * We currently do not free assignements of panel fitters on
4008 * ivb/hsw (since we don't use the higher upscaling modes which
4009 * differentiates them) so just WARN about this case for now.
4011 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
4012 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
4015 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
4016 struct intel_crtc_state *pipe_config)
4018 struct drm_device *dev = crtc->base.dev;
4019 struct drm_i915_private *dev_priv = to_i915(dev);
4020 enum intel_display_power_domain power_domain;
4021 intel_wakeref_t wakeref;
4025 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4026 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4030 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4031 pipe_config->shared_dpll = NULL;
4034 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4035 if (!(tmp & PIPECONF_ENABLE))
4038 switch (tmp & PIPECONF_BPC_MASK) {
4040 pipe_config->pipe_bpp = 18;
4043 pipe_config->pipe_bpp = 24;
4045 case PIPECONF_10BPC:
4046 pipe_config->pipe_bpp = 30;
4048 case PIPECONF_12BPC:
4049 pipe_config->pipe_bpp = 36;
4055 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4056 pipe_config->limited_color_range = true;
4058 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4059 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4060 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4061 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4064 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4068 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
4069 PIPECONF_GAMMA_MODE_SHIFT;
4071 pipe_config->csc_mode = intel_de_read(dev_priv,
4072 PIPE_CSC_MODE(crtc->pipe));
4074 i9xx_get_pipe_color_config(pipe_config);
4075 intel_color_get_config(pipe_config);
4077 pipe_config->pixel_multiplier = 1;
4079 ilk_pch_get_config(pipe_config);
4081 intel_get_transcoder_timings(crtc, pipe_config);
4082 intel_get_pipe_src_size(crtc, pipe_config);
4084 ilk_get_pfit_config(pipe_config);
4089 intel_display_power_put(dev_priv, power_domain, wakeref);
4094 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4096 if (DISPLAY_VER(i915) >= 12)
4097 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4098 else if (DISPLAY_VER(i915) >= 11)
4099 return BIT(PIPE_B) | BIT(PIPE_C);
4104 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4105 enum transcoder cpu_transcoder)
4107 enum intel_display_power_domain power_domain;
4108 intel_wakeref_t wakeref;
4111 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4113 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4114 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4116 return tmp & TRANS_DDI_FUNC_ENABLE;
4119 static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4121 u8 master_pipes = 0, slave_pipes = 0;
4122 struct intel_crtc *crtc;
4124 for_each_intel_crtc(&dev_priv->drm, crtc) {
4125 enum intel_display_power_domain power_domain;
4126 enum pipe pipe = crtc->pipe;
4127 intel_wakeref_t wakeref;
4129 if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4132 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4133 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4134 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4136 if (!(tmp & BIG_JOINER_ENABLE))
4139 if (tmp & MASTER_BIG_JOINER_ENABLE)
4140 master_pipes |= BIT(pipe);
4142 slave_pipes |= BIT(pipe);
4145 if (DISPLAY_VER(dev_priv) < 13)
4148 power_domain = POWER_DOMAIN_PIPE(pipe);
4149 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4150 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4152 if (tmp & UNCOMPRESSED_JOINER_MASTER)
4153 master_pipes |= BIT(pipe);
4154 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4155 slave_pipes |= BIT(pipe);
4159 /* Bigjoiner pipes should always be consecutive master and slave */
4160 drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4161 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4162 master_pipes, slave_pipes);
4167 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4169 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4171 if (DISPLAY_VER(i915) >= 11)
4172 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4174 return panel_transcoder_mask;
4177 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4179 struct drm_device *dev = crtc->base.dev;
4180 struct drm_i915_private *dev_priv = to_i915(dev);
4181 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4182 enum transcoder cpu_transcoder;
4183 u8 enabled_transcoders = 0;
4186 * XXX: Do intel_display_power_get_if_enabled before reading this (for
4187 * consistency and less surprising code; it's in always on power).
4189 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4190 panel_transcoder_mask) {
4191 enum intel_display_power_domain power_domain;
4192 intel_wakeref_t wakeref;
4193 enum pipe trans_pipe;
4196 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4197 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4198 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4200 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4203 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4206 "unknown pipe linked to transcoder %s\n",
4207 transcoder_name(cpu_transcoder));
4209 case TRANS_DDI_EDP_INPUT_A_ONOFF:
4210 case TRANS_DDI_EDP_INPUT_A_ON:
4211 trans_pipe = PIPE_A;
4213 case TRANS_DDI_EDP_INPUT_B_ONOFF:
4214 trans_pipe = PIPE_B;
4216 case TRANS_DDI_EDP_INPUT_C_ONOFF:
4217 trans_pipe = PIPE_C;
4219 case TRANS_DDI_EDP_INPUT_D_ONOFF:
4220 trans_pipe = PIPE_D;
4224 if (trans_pipe == crtc->pipe)
4225 enabled_transcoders |= BIT(cpu_transcoder);
4228 /* single pipe or bigjoiner master */
4229 cpu_transcoder = (enum transcoder) crtc->pipe;
4230 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4231 enabled_transcoders |= BIT(cpu_transcoder);
4233 /* bigjoiner slave -> consider the master pipe's transcoder as well */
4234 if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4235 cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4236 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4237 enabled_transcoders |= BIT(cpu_transcoder);
4240 return enabled_transcoders;
4243 static bool has_edp_transcoders(u8 enabled_transcoders)
4245 return enabled_transcoders & BIT(TRANSCODER_EDP);
4248 static bool has_dsi_transcoders(u8 enabled_transcoders)
4250 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4251 BIT(TRANSCODER_DSI_1));
4254 static bool has_pipe_transcoders(u8 enabled_transcoders)
4256 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4257 BIT(TRANSCODER_DSI_0) |
4258 BIT(TRANSCODER_DSI_1));
4261 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4262 u8 enabled_transcoders)
4264 /* Only one type of transcoder please */
4265 drm_WARN_ON(&i915->drm,
4266 has_edp_transcoders(enabled_transcoders) +
4267 has_dsi_transcoders(enabled_transcoders) +
4268 has_pipe_transcoders(enabled_transcoders) > 1);
4270 /* Only DSI transcoders can be ganged */
4271 drm_WARN_ON(&i915->drm,
4272 !has_dsi_transcoders(enabled_transcoders) &&
4273 !is_power_of_2(enabled_transcoders));
4276 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4277 struct intel_crtc_state *pipe_config,
4278 struct intel_display_power_domain_set *power_domain_set)
4280 struct drm_device *dev = crtc->base.dev;
4281 struct drm_i915_private *dev_priv = to_i915(dev);
4282 unsigned long enabled_transcoders;
4285 enabled_transcoders = hsw_enabled_transcoders(crtc);
4286 if (!enabled_transcoders)
4289 assert_enabled_transcoders(dev_priv, enabled_transcoders);
4292 * With the exception of DSI we should only ever have
4293 * a single enabled transcoder. With DSI let's just
4294 * pick the first one.
4296 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4298 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4299 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4302 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4303 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4305 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4306 pipe_config->pch_pfit.force_thru = true;
4309 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4311 return tmp & PIPECONF_ENABLE;
4314 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4315 struct intel_crtc_state *pipe_config,
4316 struct intel_display_power_domain_set *power_domain_set)
4318 struct drm_device *dev = crtc->base.dev;
4319 struct drm_i915_private *dev_priv = to_i915(dev);
4320 enum transcoder cpu_transcoder;
4324 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4326 cpu_transcoder = TRANSCODER_DSI_A;
4328 cpu_transcoder = TRANSCODER_DSI_C;
4330 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4331 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4335 * The PLL needs to be enabled with a valid divider
4336 * configuration, otherwise accessing DSI registers will hang
4337 * the machine. See BSpec North Display Engine
4338 * registers/MIPI[BXT]. We can break out here early, since we
4339 * need the same DSI PLL to be enabled for both DSI ports.
4341 if (!bxt_dsi_pll_is_enabled(dev_priv))
4344 /* XXX: this works for video mode only */
4345 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4346 if (!(tmp & DPI_ENABLE))
4349 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4350 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4353 pipe_config->cpu_transcoder = cpu_transcoder;
4357 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4360 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4361 struct intel_crtc_state *pipe_config)
4363 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4364 struct intel_display_power_domain_set power_domain_set = { };
4368 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4369 POWER_DOMAIN_PIPE(crtc->pipe)))
4372 pipe_config->shared_dpll = NULL;
4374 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4376 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4377 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4378 drm_WARN_ON(&dev_priv->drm, active);
4385 intel_dsc_get_config(pipe_config);
4386 if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4387 intel_uncompressed_joiner_get_config(pipe_config);
4389 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4390 DISPLAY_VER(dev_priv) >= 11)
4391 intel_get_transcoder_timings(crtc, pipe_config);
4393 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4394 intel_vrr_get_config(crtc, pipe_config);
4396 intel_get_pipe_src_size(crtc, pipe_config);
4398 if (IS_HASWELL(dev_priv)) {
4399 u32 tmp = intel_de_read(dev_priv,
4400 PIPECONF(pipe_config->cpu_transcoder));
4402 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4403 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4405 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4407 pipe_config->output_format =
4408 bdw_get_pipemisc_output_format(crtc);
4411 pipe_config->gamma_mode = intel_de_read(dev_priv,
4412 GAMMA_MODE(crtc->pipe));
4414 pipe_config->csc_mode = intel_de_read(dev_priv,
4415 PIPE_CSC_MODE(crtc->pipe));
4417 if (DISPLAY_VER(dev_priv) >= 9) {
4418 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4420 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4421 pipe_config->gamma_enable = true;
4423 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4424 pipe_config->csc_enable = true;
4426 i9xx_get_pipe_color_config(pipe_config);
4429 intel_color_get_config(pipe_config);
4431 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4432 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4433 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4434 pipe_config->ips_linetime =
4435 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4437 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4438 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4439 if (DISPLAY_VER(dev_priv) >= 9)
4440 skl_get_pfit_config(pipe_config);
4442 ilk_get_pfit_config(pipe_config);
4445 if (hsw_crtc_supports_ips(crtc)) {
4446 if (IS_HASWELL(dev_priv))
4447 pipe_config->ips_enabled = intel_de_read(dev_priv,
4448 IPS_CTL) & IPS_ENABLE;
4451 * We cannot readout IPS state on broadwell, set to
4452 * true so we can set it to a defined state on first
4455 pipe_config->ips_enabled = true;
4459 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4460 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4461 pipe_config->pixel_multiplier =
4462 intel_de_read(dev_priv,
4463 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4465 pipe_config->pixel_multiplier = 1;
4469 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4474 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4476 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4477 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4479 if (!i915->display->get_pipe_config(crtc, crtc_state))
4482 crtc_state->hw.active = true;
4484 intel_crtc_readout_derived_state(crtc_state);
4489 /* VESA 640x480x72Hz mode to set on the pipe */
4490 static const struct drm_display_mode load_detect_mode = {
4491 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4492 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4495 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4496 struct drm_crtc *crtc)
4498 struct drm_plane *plane;
4499 struct drm_plane_state *plane_state;
4502 ret = drm_atomic_add_affected_planes(state, crtc);
4506 for_each_new_plane_in_state(state, plane, plane_state, i) {
4507 if (plane_state->crtc != crtc)
4510 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4514 drm_atomic_set_fb_for_plane(plane_state, NULL);
4520 int intel_get_load_detect_pipe(struct drm_connector *connector,
4521 struct intel_load_detect_pipe *old,
4522 struct drm_modeset_acquire_ctx *ctx)
4524 struct intel_encoder *encoder =
4525 intel_attached_encoder(to_intel_connector(connector));
4526 struct intel_crtc *possible_crtc;
4527 struct intel_crtc *crtc = NULL;
4528 struct drm_device *dev = encoder->base.dev;
4529 struct drm_i915_private *dev_priv = to_i915(dev);
4530 struct drm_mode_config *config = &dev->mode_config;
4531 struct drm_atomic_state *state = NULL, *restore_state = NULL;
4532 struct drm_connector_state *connector_state;
4533 struct intel_crtc_state *crtc_state;
4536 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4537 connector->base.id, connector->name,
4538 encoder->base.base.id, encoder->base.name);
4540 old->restore_state = NULL;
4542 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4545 * Algorithm gets a little messy:
4547 * - if the connector already has an assigned crtc, use it (but make
4548 * sure it's on first)
4550 * - try to find the first unused crtc that can drive this connector,
4551 * and use that if we find one
4554 /* See if we already have a CRTC for this connector */
4555 if (connector->state->crtc) {
4556 crtc = to_intel_crtc(connector->state->crtc);
4558 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4562 /* Make sure the crtc and connector are running */
4566 /* Find an unused one (if possible) */
4567 for_each_intel_crtc(dev, possible_crtc) {
4568 if (!(encoder->base.possible_crtcs &
4569 drm_crtc_mask(&possible_crtc->base)))
4572 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4576 if (possible_crtc->base.state->enable) {
4577 drm_modeset_unlock(&possible_crtc->base.mutex);
4581 crtc = possible_crtc;
4586 * If we didn't find an unused CRTC, don't use any.
4589 drm_dbg_kms(&dev_priv->drm,
4590 "no pipe available for load-detect\n");
4596 state = drm_atomic_state_alloc(dev);
4597 restore_state = drm_atomic_state_alloc(dev);
4598 if (!state || !restore_state) {
4603 state->acquire_ctx = ctx;
4604 restore_state->acquire_ctx = ctx;
4606 connector_state = drm_atomic_get_connector_state(state, connector);
4607 if (IS_ERR(connector_state)) {
4608 ret = PTR_ERR(connector_state);
4612 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4616 crtc_state = intel_atomic_get_crtc_state(state, crtc);
4617 if (IS_ERR(crtc_state)) {
4618 ret = PTR_ERR(crtc_state);
4622 crtc_state->uapi.active = true;
4624 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4629 ret = intel_modeset_disable_planes(state, &crtc->base);
4633 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4635 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4637 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4639 drm_dbg_kms(&dev_priv->drm,
4640 "Failed to create a copy of old state to restore: %i\n",
4645 ret = drm_atomic_commit(state);
4647 drm_dbg_kms(&dev_priv->drm,
4648 "failed to set mode on load-detect pipe\n");
4652 old->restore_state = restore_state;
4653 drm_atomic_state_put(state);
4655 /* let the connector get through one full cycle before testing */
4656 intel_crtc_wait_for_next_vblank(crtc);
4662 drm_atomic_state_put(state);
4665 if (restore_state) {
4666 drm_atomic_state_put(restore_state);
4667 restore_state = NULL;
4670 if (ret == -EDEADLK)
4676 void intel_release_load_detect_pipe(struct drm_connector *connector,
4677 struct intel_load_detect_pipe *old,
4678 struct drm_modeset_acquire_ctx *ctx)
4680 struct intel_encoder *intel_encoder =
4681 intel_attached_encoder(to_intel_connector(connector));
4682 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4683 struct drm_encoder *encoder = &intel_encoder->base;
4684 struct drm_atomic_state *state = old->restore_state;
4687 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4688 connector->base.id, connector->name,
4689 encoder->base.id, encoder->name);
4694 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4696 drm_dbg_kms(&i915->drm,
4697 "Couldn't release load detect pipe: %i\n", ret);
4698 drm_atomic_state_put(state);
4701 static int i9xx_pll_refclk(struct drm_device *dev,
4702 const struct intel_crtc_state *pipe_config)
4704 struct drm_i915_private *dev_priv = to_i915(dev);
4705 u32 dpll = pipe_config->dpll_hw_state.dpll;
4707 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4708 return dev_priv->vbt.lvds_ssc_freq;
4709 else if (HAS_PCH_SPLIT(dev_priv))
4711 else if (DISPLAY_VER(dev_priv) != 2)
4717 /* Returns the clock of the currently programmed mode of the given pipe. */
4718 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4719 struct intel_crtc_state *pipe_config)
4721 struct drm_device *dev = crtc->base.dev;
4722 struct drm_i915_private *dev_priv = to_i915(dev);
4723 u32 dpll = pipe_config->dpll_hw_state.dpll;
4727 int refclk = i9xx_pll_refclk(dev, pipe_config);
4729 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4730 fp = pipe_config->dpll_hw_state.fp0;
4732 fp = pipe_config->dpll_hw_state.fp1;
4734 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4735 if (IS_PINEVIEW(dev_priv)) {
4736 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4737 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4739 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4740 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4743 if (DISPLAY_VER(dev_priv) != 2) {
4744 if (IS_PINEVIEW(dev_priv))
4745 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4746 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4748 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4749 DPLL_FPA01_P1_POST_DIV_SHIFT);
4751 switch (dpll & DPLL_MODE_MASK) {
4752 case DPLLB_MODE_DAC_SERIAL:
4753 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4756 case DPLLB_MODE_LVDS:
4757 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4761 drm_dbg_kms(&dev_priv->drm,
4762 "Unknown DPLL mode %08x in programmed "
4763 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4767 if (IS_PINEVIEW(dev_priv))
4768 port_clock = pnv_calc_dpll_params(refclk, &clock);
4770 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4772 enum pipe lvds_pipe;
4774 if (IS_I85X(dev_priv) &&
4775 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4776 lvds_pipe == crtc->pipe) {
4777 u32 lvds = intel_de_read(dev_priv, LVDS);
4779 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4780 DPLL_FPA01_P1_POST_DIV_SHIFT);
4782 if (lvds & LVDS_CLKB_POWER_UP)
4787 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4790 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4791 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4793 if (dpll & PLL_P2_DIVIDE_BY_4)
4799 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4803 * This value includes pixel_multiplier. We will use
4804 * port_clock to compute adjusted_mode.crtc_clock in the
4805 * encoder's get_config() function.
4807 pipe_config->port_clock = port_clock;
4810 int intel_dotclock_calculate(int link_freq,
4811 const struct intel_link_m_n *m_n)
4814 * The calculation for the data clock is:
4815 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4816 * But we want to avoid losing precison if possible, so:
4817 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4819 * and the link clock is simpler:
4820 * link_clock = (m * link_clock) / n
4826 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4829 /* Returns the currently programmed mode of the given encoder. */
4830 struct drm_display_mode *
4831 intel_encoder_current_mode(struct intel_encoder *encoder)
4833 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4834 struct intel_crtc_state *crtc_state;
4835 struct drm_display_mode *mode;
4836 struct intel_crtc *crtc;
4839 if (!encoder->get_hw_state(encoder, &pipe))
4842 crtc = intel_crtc_for_pipe(dev_priv, pipe);
4844 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4848 crtc_state = intel_crtc_state_alloc(crtc);
4854 if (!intel_crtc_get_pipe_config(crtc_state)) {
4860 intel_encoder_get_config(encoder, crtc_state);
4862 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4870 * intel_wm_need_update - Check whether watermarks need updating
4871 * @cur: current plane state
4872 * @new: new plane state
4874 * Check current plane state versus the new one to determine whether
4875 * watermarks need to be recalculated.
4877 * Returns true or false.
4879 static bool intel_wm_need_update(const struct intel_plane_state *cur,
4880 struct intel_plane_state *new)
4882 /* Update watermarks on tiling or size changes. */
4883 if (new->uapi.visible != cur->uapi.visible)
4886 if (!cur->hw.fb || !new->hw.fb)
4889 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4890 cur->hw.rotation != new->hw.rotation ||
4891 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4892 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4893 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4894 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4900 static bool needs_scaling(const struct intel_plane_state *state)
4902 int src_w = drm_rect_width(&state->uapi.src) >> 16;
4903 int src_h = drm_rect_height(&state->uapi.src) >> 16;
4904 int dst_w = drm_rect_width(&state->uapi.dst);
4905 int dst_h = drm_rect_height(&state->uapi.dst);
4907 return (src_w != dst_w || src_h != dst_h);
4910 static bool intel_plane_do_async_flip(struct intel_plane *plane,
4911 const struct intel_crtc_state *old_crtc_state,
4912 const struct intel_crtc_state *new_crtc_state)
4914 struct drm_i915_private *i915 = to_i915(plane->base.dev);
4916 if (!plane->async_flip)
4919 if (!new_crtc_state->uapi.async_flip)
4923 * In platforms after DISPLAY13, we might need to override
4924 * first async flip in order to change watermark levels
4925 * as part of optimization.
4926 * So for those, we are checking if this is a first async flip.
4927 * For platforms earlier than DISPLAY13 we always do async flip.
4929 return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
4932 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4933 struct intel_crtc_state *new_crtc_state,
4934 const struct intel_plane_state *old_plane_state,
4935 struct intel_plane_state *new_plane_state)
4937 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4938 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4939 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4940 bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4941 bool was_crtc_enabled = old_crtc_state->hw.active;
4942 bool is_crtc_enabled = new_crtc_state->hw.active;
4943 bool turn_off, turn_on, visible, was_visible;
4946 if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4947 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4952 was_visible = old_plane_state->uapi.visible;
4953 visible = new_plane_state->uapi.visible;
4955 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4956 was_visible = false;
4959 * Visibility is calculated as if the crtc was on, but
4960 * after scaler setup everything depends on it being off
4961 * when the crtc isn't active.
4963 * FIXME this is wrong for watermarks. Watermarks should also
4964 * be computed as if the pipe would be active. Perhaps move
4965 * per-plane wm computation to the .check_plane() hook, and
4966 * only combine the results from all planes in the current place?
4968 if (!is_crtc_enabled) {
4969 intel_plane_set_invisible(new_crtc_state, new_plane_state);
4973 if (!was_visible && !visible)
4976 turn_off = was_visible && (!visible || mode_changed);
4977 turn_on = visible && (!was_visible || mode_changed);
4979 drm_dbg_atomic(&dev_priv->drm,
4980 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4981 crtc->base.base.id, crtc->base.name,
4982 plane->base.base.id, plane->base.name,
4983 was_visible, visible,
4984 turn_off, turn_on, mode_changed);
4987 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4988 new_crtc_state->update_wm_pre = true;
4990 /* must disable cxsr around plane enable/disable */
4991 if (plane->id != PLANE_CURSOR)
4992 new_crtc_state->disable_cxsr = true;
4993 } else if (turn_off) {
4994 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4995 new_crtc_state->update_wm_post = true;
4997 /* must disable cxsr around plane enable/disable */
4998 if (plane->id != PLANE_CURSOR)
4999 new_crtc_state->disable_cxsr = true;
5000 } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
5001 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
5002 /* FIXME bollocks */
5003 new_crtc_state->update_wm_pre = true;
5004 new_crtc_state->update_wm_post = true;
5008 if (visible || was_visible)
5009 new_crtc_state->fb_bits |= plane->frontbuffer_bit;
5012 * ILK/SNB DVSACNTR/Sprite Enable
5013 * IVB SPR_CTL/Sprite Enable
5014 * "When in Self Refresh Big FIFO mode, a write to enable the
5015 * plane will be internally buffered and delayed while Big FIFO
5018 * Which means that enabling the sprite can take an extra frame
5019 * when we start in big FIFO mode (LP1+). Thus we need to drop
5020 * down to LP0 and wait for vblank in order to make sure the
5021 * sprite gets enabled on the next vblank after the register write.
5022 * Doing otherwise would risk enabling the sprite one frame after
5023 * we've already signalled flip completion. We can resume LP1+
5024 * once the sprite has been enabled.
5027 * WaCxSRDisabledForSpriteScaling:ivb
5028 * IVB SPR_SCALE/Scaling Enable
5029 * "Low Power watermarks must be disabled for at least one
5030 * frame before enabling sprite scaling, and kept disabled
5031 * until sprite scaling is disabled."
5033 * ILK/SNB DVSASCALE/Scaling Enable
5034 * "When in Self Refresh Big FIFO mode, scaling enable will be
5035 * masked off while Big FIFO mode is exiting."
5037 * Despite the w/a only being listed for IVB we assume that
5038 * the ILK/SNB note has similar ramifications, hence we apply
5039 * the w/a on all three platforms.
5041 * With experimental results seems this is needed also for primary
5042 * plane, not only sprite plane.
5044 if (plane->id != PLANE_CURSOR &&
5045 (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5046 IS_IVYBRIDGE(dev_priv)) &&
5047 (turn_on || (!needs_scaling(old_plane_state) &&
5048 needs_scaling(new_plane_state))))
5049 new_crtc_state->disable_lp_wm = true;
5051 if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
5052 new_plane_state->do_async_flip = true;
5057 static bool encoders_cloneable(const struct intel_encoder *a,
5058 const struct intel_encoder *b)
5060 /* masks could be asymmetric, so check both ways */
5061 return a == b || (a->cloneable & (1 << b->type) &&
5062 b->cloneable & (1 << a->type));
5065 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5066 struct intel_crtc *crtc,
5067 struct intel_encoder *encoder)
5069 struct intel_encoder *source_encoder;
5070 struct drm_connector *connector;
5071 struct drm_connector_state *connector_state;
5074 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5075 if (connector_state->crtc != &crtc->base)
5079 to_intel_encoder(connector_state->best_encoder);
5080 if (!encoders_cloneable(encoder, source_encoder))
5087 static int icl_add_linked_planes(struct intel_atomic_state *state)
5089 struct intel_plane *plane, *linked;
5090 struct intel_plane_state *plane_state, *linked_plane_state;
5093 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5094 linked = plane_state->planar_linked_plane;
5099 linked_plane_state = intel_atomic_get_plane_state(state, linked);
5100 if (IS_ERR(linked_plane_state))
5101 return PTR_ERR(linked_plane_state);
5103 drm_WARN_ON(state->base.dev,
5104 linked_plane_state->planar_linked_plane != plane);
5105 drm_WARN_ON(state->base.dev,
5106 linked_plane_state->planar_slave == plane_state->planar_slave);
5112 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5114 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5115 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5116 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5117 struct intel_plane *plane, *linked;
5118 struct intel_plane_state *plane_state;
5121 if (DISPLAY_VER(dev_priv) < 11)
5125 * Destroy all old plane links and make the slave plane invisible
5126 * in the crtc_state->active_planes mask.
5128 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5129 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5132 plane_state->planar_linked_plane = NULL;
5133 if (plane_state->planar_slave && !plane_state->uapi.visible) {
5134 crtc_state->enabled_planes &= ~BIT(plane->id);
5135 crtc_state->active_planes &= ~BIT(plane->id);
5136 crtc_state->update_planes |= BIT(plane->id);
5139 plane_state->planar_slave = false;
5142 if (!crtc_state->nv12_planes)
5145 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5146 struct intel_plane_state *linked_state = NULL;
5148 if (plane->pipe != crtc->pipe ||
5149 !(crtc_state->nv12_planes & BIT(plane->id)))
5152 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5153 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5156 if (crtc_state->active_planes & BIT(linked->id))
5159 linked_state = intel_atomic_get_plane_state(state, linked);
5160 if (IS_ERR(linked_state))
5161 return PTR_ERR(linked_state);
5166 if (!linked_state) {
5167 drm_dbg_kms(&dev_priv->drm,
5168 "Need %d free Y planes for planar YUV\n",
5169 hweight8(crtc_state->nv12_planes));
5174 plane_state->planar_linked_plane = linked;
5176 linked_state->planar_slave = true;
5177 linked_state->planar_linked_plane = plane;
5178 crtc_state->enabled_planes |= BIT(linked->id);
5179 crtc_state->active_planes |= BIT(linked->id);
5180 crtc_state->update_planes |= BIT(linked->id);
5181 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5182 linked->base.name, plane->base.name);
5184 /* Copy parameters to slave plane */
5185 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5186 linked_state->color_ctl = plane_state->color_ctl;
5187 linked_state->view = plane_state->view;
5188 linked_state->decrypt = plane_state->decrypt;
5190 intel_plane_copy_hw_state(linked_state, plane_state);
5191 linked_state->uapi.src = plane_state->uapi.src;
5192 linked_state->uapi.dst = plane_state->uapi.dst;
5194 if (icl_is_hdr_plane(dev_priv, plane->id)) {
5195 if (linked->id == PLANE_SPRITE5)
5196 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
5197 else if (linked->id == PLANE_SPRITE4)
5198 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
5199 else if (linked->id == PLANE_SPRITE3)
5200 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
5201 else if (linked->id == PLANE_SPRITE2)
5202 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
5204 MISSING_CASE(linked->id);
5211 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5213 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5214 struct intel_atomic_state *state =
5215 to_intel_atomic_state(new_crtc_state->uapi.state);
5216 const struct intel_crtc_state *old_crtc_state =
5217 intel_atomic_get_old_crtc_state(state, crtc);
5219 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5222 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5224 const struct drm_display_mode *pipe_mode =
5225 &crtc_state->hw.pipe_mode;
5228 if (!crtc_state->hw.enable)
5231 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5232 pipe_mode->crtc_clock);
5234 return min(linetime_wm, 0x1ff);
5237 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5238 const struct intel_cdclk_state *cdclk_state)
5240 const struct drm_display_mode *pipe_mode =
5241 &crtc_state->hw.pipe_mode;
5244 if (!crtc_state->hw.enable)
5247 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5248 cdclk_state->logical.cdclk);
5250 return min(linetime_wm, 0x1ff);
5253 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5255 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5256 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5257 const struct drm_display_mode *pipe_mode =
5258 &crtc_state->hw.pipe_mode;
5261 if (!crtc_state->hw.enable)
5264 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5265 crtc_state->pixel_rate);
5267 /* Display WA #1135: BXT:ALL GLK:ALL */
5268 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5269 dev_priv->ipc_enabled)
5272 return min(linetime_wm, 0x1ff);
5275 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5276 struct intel_crtc *crtc)
5278 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5279 struct intel_crtc_state *crtc_state =
5280 intel_atomic_get_new_crtc_state(state, crtc);
5281 const struct intel_cdclk_state *cdclk_state;
5283 if (DISPLAY_VER(dev_priv) >= 9)
5284 crtc_state->linetime = skl_linetime_wm(crtc_state);
5286 crtc_state->linetime = hsw_linetime_wm(crtc_state);
5288 if (!hsw_crtc_supports_ips(crtc))
5291 cdclk_state = intel_atomic_get_cdclk_state(state);
5292 if (IS_ERR(cdclk_state))
5293 return PTR_ERR(cdclk_state);
5295 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5301 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5302 struct intel_crtc *crtc)
5304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5305 struct intel_crtc_state *crtc_state =
5306 intel_atomic_get_new_crtc_state(state, crtc);
5307 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5310 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5311 mode_changed && !crtc_state->hw.active)
5312 crtc_state->update_wm_post = true;
5314 if (mode_changed && crtc_state->hw.enable &&
5315 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5316 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5322 * May need to update pipe gamma enable bits
5323 * when C8 planes are getting enabled/disabled.
5325 if (c8_planes_changed(crtc_state))
5326 crtc_state->uapi.color_mgmt_changed = true;
5328 if (mode_changed || crtc_state->update_pipe ||
5329 crtc_state->uapi.color_mgmt_changed) {
5330 ret = intel_color_check(crtc_state);
5335 ret = intel_compute_pipe_wm(state, crtc);
5337 drm_dbg_kms(&dev_priv->drm,
5338 "Target pipe watermarks are invalid\n");
5343 * Calculate 'intermediate' watermarks that satisfy both the
5344 * old state and the new state. We can program these
5347 ret = intel_compute_intermediate_wm(state, crtc);
5349 drm_dbg_kms(&dev_priv->drm,
5350 "No valid intermediate pipe watermarks are possible\n");
5354 if (DISPLAY_VER(dev_priv) >= 9) {
5355 if (mode_changed || crtc_state->update_pipe) {
5356 ret = skl_update_scaler_crtc(crtc_state);
5361 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5366 if (HAS_IPS(dev_priv)) {
5367 ret = hsw_compute_ips_config(crtc_state);
5372 if (DISPLAY_VER(dev_priv) >= 9 ||
5373 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5374 ret = hsw_compute_linetime_wm(state, crtc);
5380 ret = intel_psr2_sel_fetch_update(state, crtc);
5387 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5389 struct intel_connector *connector;
5390 struct drm_connector_list_iter conn_iter;
5392 drm_connector_list_iter_begin(dev, &conn_iter);
5393 for_each_intel_connector_iter(connector, &conn_iter) {
5394 struct drm_connector_state *conn_state = connector->base.state;
5395 struct intel_encoder *encoder =
5396 to_intel_encoder(connector->base.encoder);
5398 if (conn_state->crtc)
5399 drm_connector_put(&connector->base);
5402 struct intel_crtc *crtc =
5403 to_intel_crtc(encoder->base.crtc);
5404 const struct intel_crtc_state *crtc_state =
5405 to_intel_crtc_state(crtc->base.state);
5407 conn_state->best_encoder = &encoder->base;
5408 conn_state->crtc = &crtc->base;
5409 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5411 drm_connector_get(&connector->base);
5413 conn_state->best_encoder = NULL;
5414 conn_state->crtc = NULL;
5417 drm_connector_list_iter_end(&conn_iter);
5421 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5422 struct intel_crtc_state *pipe_config)
5424 struct drm_connector *connector = conn_state->connector;
5425 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5426 const struct drm_display_info *info = &connector->display_info;
5429 switch (conn_state->max_bpc) {
5443 MISSING_CASE(conn_state->max_bpc);
5447 if (bpp < pipe_config->pipe_bpp) {
5448 drm_dbg_kms(&i915->drm,
5449 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5450 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5451 connector->base.id, connector->name,
5453 3 * conn_state->max_requested_bpc,
5454 pipe_config->pipe_bpp);
5456 pipe_config->pipe_bpp = bpp;
5463 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5464 struct intel_crtc_state *pipe_config)
5466 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5467 struct drm_atomic_state *state = pipe_config->uapi.state;
5468 struct drm_connector *connector;
5469 struct drm_connector_state *connector_state;
5472 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5473 IS_CHERRYVIEW(dev_priv)))
5475 else if (DISPLAY_VER(dev_priv) >= 5)
5480 pipe_config->pipe_bpp = bpp;
5482 /* Clamp display bpp to connector max bpp */
5483 for_each_new_connector_in_state(state, connector, connector_state, i) {
5486 if (connector_state->crtc != &crtc->base)
5489 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5497 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5498 const struct drm_display_mode *mode)
5500 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5501 "type: 0x%x flags: 0x%x\n",
5503 mode->crtc_hdisplay, mode->crtc_hsync_start,
5504 mode->crtc_hsync_end, mode->crtc_htotal,
5505 mode->crtc_vdisplay, mode->crtc_vsync_start,
5506 mode->crtc_vsync_end, mode->crtc_vtotal,
5507 mode->type, mode->flags);
5511 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5512 const char *id, unsigned int lane_count,
5513 const struct intel_link_m_n *m_n)
5515 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5517 drm_dbg_kms(&i915->drm,
5518 "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5520 m_n->gmch_m, m_n->gmch_n,
5521 m_n->link_m, m_n->link_n, m_n->tu);
5525 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5526 const union hdmi_infoframe *frame)
5528 if (!drm_debug_enabled(DRM_UT_KMS))
5531 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5535 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5536 const struct drm_dp_vsc_sdp *vsc)
5538 if (!drm_debug_enabled(DRM_UT_KMS))
5541 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5544 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5546 static const char * const output_type_str[] = {
5547 OUTPUT_TYPE(UNUSED),
5548 OUTPUT_TYPE(ANALOG),
5558 OUTPUT_TYPE(DP_MST),
5563 static void snprintf_output_types(char *buf, size_t len,
5564 unsigned int output_types)
5571 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5574 if ((output_types & BIT(i)) == 0)
5577 r = snprintf(str, len, "%s%s",
5578 str != buf ? "," : "", output_type_str[i]);
5584 output_types &= ~BIT(i);
5587 WARN_ON_ONCE(output_types != 0);
5590 static const char * const output_format_str[] = {
5591 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5592 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5593 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5596 static const char *output_formats(enum intel_output_format format)
5598 if (format >= ARRAY_SIZE(output_format_str))
5600 return output_format_str[format];
5603 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5605 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5606 struct drm_i915_private *i915 = to_i915(plane->base.dev);
5607 const struct drm_framebuffer *fb = plane_state->hw.fb;
5610 drm_dbg_kms(&i915->drm,
5611 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5612 plane->base.base.id, plane->base.name,
5613 yesno(plane_state->uapi.visible));
5617 drm_dbg_kms(&i915->drm,
5618 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5619 plane->base.base.id, plane->base.name,
5620 fb->base.id, fb->width, fb->height, &fb->format->format,
5621 fb->modifier, yesno(plane_state->uapi.visible));
5622 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5623 plane_state->hw.rotation, plane_state->scaler_id);
5624 if (plane_state->uapi.visible)
5625 drm_dbg_kms(&i915->drm,
5626 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5627 DRM_RECT_FP_ARG(&plane_state->uapi.src),
5628 DRM_RECT_ARG(&plane_state->uapi.dst));
5631 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5632 struct intel_atomic_state *state,
5633 const char *context)
5635 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5636 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5637 const struct intel_plane_state *plane_state;
5638 struct intel_plane *plane;
5642 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5643 crtc->base.base.id, crtc->base.name,
5644 yesno(pipe_config->hw.enable), context);
5646 if (!pipe_config->hw.enable)
5649 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5650 drm_dbg_kms(&dev_priv->drm,
5651 "active: %s, output_types: %s (0x%x), output format: %s\n",
5652 yesno(pipe_config->hw.active),
5653 buf, pipe_config->output_types,
5654 output_formats(pipe_config->output_format));
5656 drm_dbg_kms(&dev_priv->drm,
5657 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5658 transcoder_name(pipe_config->cpu_transcoder),
5659 pipe_config->pipe_bpp, pipe_config->dither);
5661 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5662 transcoder_name(pipe_config->mst_master_transcoder));
5664 drm_dbg_kms(&dev_priv->drm,
5665 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5666 transcoder_name(pipe_config->master_transcoder),
5667 pipe_config->sync_mode_slaves_mask);
5669 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5670 pipe_config->bigjoiner_slave ? "slave" :
5671 pipe_config->bigjoiner ? "master" : "no");
5673 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5674 enableddisabled(pipe_config->splitter.enable),
5675 pipe_config->splitter.link_count,
5676 pipe_config->splitter.pixel_overlap);
5678 if (pipe_config->has_pch_encoder)
5679 intel_dump_m_n_config(pipe_config, "fdi",
5680 pipe_config->fdi_lanes,
5681 &pipe_config->fdi_m_n);
5683 if (intel_crtc_has_dp_encoder(pipe_config)) {
5684 intel_dump_m_n_config(pipe_config, "dp m_n",
5685 pipe_config->lane_count, &pipe_config->dp_m_n);
5686 if (pipe_config->has_drrs)
5687 intel_dump_m_n_config(pipe_config, "dp m2_n2",
5688 pipe_config->lane_count,
5689 &pipe_config->dp_m2_n2);
5692 drm_dbg_kms(&dev_priv->drm,
5693 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5694 pipe_config->has_audio, pipe_config->has_infoframe,
5695 pipe_config->infoframes.enable);
5697 if (pipe_config->infoframes.enable &
5698 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5699 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5700 pipe_config->infoframes.gcp);
5701 if (pipe_config->infoframes.enable &
5702 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5703 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5704 if (pipe_config->infoframes.enable &
5705 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5706 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5707 if (pipe_config->infoframes.enable &
5708 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5709 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5710 if (pipe_config->infoframes.enable &
5711 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5712 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5713 if (pipe_config->infoframes.enable &
5714 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5715 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5716 if (pipe_config->infoframes.enable &
5717 intel_hdmi_infoframe_enable(DP_SDP_VSC))
5718 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5720 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5721 yesno(pipe_config->vrr.enable),
5722 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5723 pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5724 pipe_config->vrr.flipline,
5725 intel_vrr_vmin_vblank_start(pipe_config),
5726 intel_vrr_vmax_vblank_start(pipe_config));
5728 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5729 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5730 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5731 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5732 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5733 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5734 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5735 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5736 drm_dbg_kms(&dev_priv->drm,
5737 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5738 pipe_config->port_clock,
5739 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5740 pipe_config->pixel_rate);
5742 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5743 pipe_config->linetime, pipe_config->ips_linetime);
5745 if (DISPLAY_VER(dev_priv) >= 9)
5746 drm_dbg_kms(&dev_priv->drm,
5747 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5749 pipe_config->scaler_state.scaler_users,
5750 pipe_config->scaler_state.scaler_id);
5752 if (HAS_GMCH(dev_priv))
5753 drm_dbg_kms(&dev_priv->drm,
5754 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5755 pipe_config->gmch_pfit.control,
5756 pipe_config->gmch_pfit.pgm_ratios,
5757 pipe_config->gmch_pfit.lvds_border_bits);
5759 drm_dbg_kms(&dev_priv->drm,
5760 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5761 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5762 enableddisabled(pipe_config->pch_pfit.enabled),
5763 yesno(pipe_config->pch_pfit.force_thru));
5765 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5766 pipe_config->ips_enabled, pipe_config->double_wide);
5768 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5770 if (IS_CHERRYVIEW(dev_priv))
5771 drm_dbg_kms(&dev_priv->drm,
5772 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5773 pipe_config->cgm_mode, pipe_config->gamma_mode,
5774 pipe_config->gamma_enable, pipe_config->csc_enable);
5776 drm_dbg_kms(&dev_priv->drm,
5777 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5778 pipe_config->csc_mode, pipe_config->gamma_mode,
5779 pipe_config->gamma_enable, pipe_config->csc_enable);
5781 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5782 pipe_config->hw.degamma_lut ?
5783 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5784 pipe_config->hw.gamma_lut ?
5785 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5791 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5792 if (plane->pipe == crtc->pipe)
5793 intel_dump_plane_state(plane_state);
5797 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5799 struct drm_device *dev = state->base.dev;
5800 struct drm_connector *connector;
5801 struct drm_connector_list_iter conn_iter;
5802 unsigned int used_ports = 0;
5803 unsigned int used_mst_ports = 0;
5807 * We're going to peek into connector->state,
5808 * hence connection_mutex must be held.
5810 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5813 * Walk the connector list instead of the encoder
5814 * list to detect the problem on ddi platforms
5815 * where there's just one encoder per digital port.
5817 drm_connector_list_iter_begin(dev, &conn_iter);
5818 drm_for_each_connector_iter(connector, &conn_iter) {
5819 struct drm_connector_state *connector_state;
5820 struct intel_encoder *encoder;
5823 drm_atomic_get_new_connector_state(&state->base,
5825 if (!connector_state)
5826 connector_state = connector->state;
5828 if (!connector_state->best_encoder)
5831 encoder = to_intel_encoder(connector_state->best_encoder);
5833 drm_WARN_ON(dev, !connector_state->crtc);
5835 switch (encoder->type) {
5836 case INTEL_OUTPUT_DDI:
5837 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5840 case INTEL_OUTPUT_DP:
5841 case INTEL_OUTPUT_HDMI:
5842 case INTEL_OUTPUT_EDP:
5843 /* the same port mustn't appear more than once */
5844 if (used_ports & BIT(encoder->port))
5847 used_ports |= BIT(encoder->port);
5849 case INTEL_OUTPUT_DP_MST:
5857 drm_connector_list_iter_end(&conn_iter);
5859 /* can't mix MST and SST/HDMI on the same port */
5860 if (used_ports & used_mst_ports)
5867 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5868 struct intel_crtc_state *crtc_state)
5870 const struct intel_crtc_state *master_crtc_state;
5871 struct intel_crtc *master_crtc;
5873 master_crtc = intel_master_crtc(crtc_state);
5874 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5876 /* No need to copy state if the master state is unchanged */
5877 if (master_crtc_state)
5878 intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5882 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5883 struct intel_crtc_state *crtc_state)
5885 crtc_state->hw.enable = crtc_state->uapi.enable;
5886 crtc_state->hw.active = crtc_state->uapi.active;
5887 crtc_state->hw.mode = crtc_state->uapi.mode;
5888 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5889 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5891 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5894 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5896 if (crtc_state->bigjoiner_slave)
5899 crtc_state->uapi.enable = crtc_state->hw.enable;
5900 crtc_state->uapi.active = crtc_state->hw.active;
5901 drm_WARN_ON(crtc_state->uapi.crtc->dev,
5902 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5904 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5905 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5907 /* copy color blobs to uapi */
5908 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5909 crtc_state->hw.degamma_lut);
5910 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5911 crtc_state->hw.gamma_lut);
5912 drm_property_replace_blob(&crtc_state->uapi.ctm,
5913 crtc_state->hw.ctm);
5917 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5918 const struct intel_crtc_state *from_crtc_state)
5920 struct intel_crtc_state *saved_state;
5922 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5926 saved_state->uapi = crtc_state->uapi;
5927 saved_state->scaler_state = crtc_state->scaler_state;
5928 saved_state->shared_dpll = crtc_state->shared_dpll;
5929 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5930 saved_state->crc_enabled = crtc_state->crc_enabled;
5932 intel_crtc_free_hw_state(crtc_state);
5933 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5936 /* Re-init hw state */
5937 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5938 crtc_state->hw.enable = from_crtc_state->hw.enable;
5939 crtc_state->hw.active = from_crtc_state->hw.active;
5940 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5941 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5944 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5945 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5946 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5947 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5948 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5949 crtc_state->bigjoiner_slave = true;
5950 crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5951 crtc_state->has_audio = from_crtc_state->has_audio;
5957 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5958 struct intel_crtc_state *crtc_state)
5960 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5961 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5962 struct intel_crtc_state *saved_state;
5964 saved_state = intel_crtc_state_alloc(crtc);
5968 /* free the old crtc_state->hw members */
5969 intel_crtc_free_hw_state(crtc_state);
5971 /* FIXME: before the switch to atomic started, a new pipe_config was
5972 * kzalloc'd. Code that depends on any field being zero should be
5973 * fixed, so that the crtc_state can be safely duplicated. For now,
5974 * only fields that are know to not cause problems are preserved. */
5976 saved_state->uapi = crtc_state->uapi;
5977 saved_state->scaler_state = crtc_state->scaler_state;
5978 saved_state->shared_dpll = crtc_state->shared_dpll;
5979 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5980 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5981 sizeof(saved_state->icl_port_dplls));
5982 saved_state->crc_enabled = crtc_state->crc_enabled;
5983 if (IS_G4X(dev_priv) ||
5984 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5985 saved_state->wm = crtc_state->wm;
5987 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5990 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5996 intel_modeset_pipe_config(struct intel_atomic_state *state,
5997 struct intel_crtc_state *pipe_config)
5999 struct drm_crtc *crtc = pipe_config->uapi.crtc;
6000 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
6001 struct drm_connector *connector;
6002 struct drm_connector_state *connector_state;
6003 int base_bpp, ret, i;
6006 pipe_config->cpu_transcoder =
6007 (enum transcoder) to_intel_crtc(crtc)->pipe;
6010 * Sanitize sync polarity flags based on requested ones. If neither
6011 * positive or negative polarity is requested, treat this as meaning
6012 * negative polarity.
6014 if (!(pipe_config->hw.adjusted_mode.flags &
6015 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
6016 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
6018 if (!(pipe_config->hw.adjusted_mode.flags &
6019 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
6020 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
6022 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
6027 base_bpp = pipe_config->pipe_bpp;
6030 * Determine the real pipe dimensions. Note that stereo modes can
6031 * increase the actual pipe size due to the frame doubling and
6032 * insertion of additional space for blanks between the frame. This
6033 * is stored in the crtc timings. We use the requested mode to do this
6034 * computation to clearly distinguish it from the adjusted mode, which
6035 * can be changed by the connectors in the below retry loop.
6037 drm_mode_get_hv_timing(&pipe_config->hw.mode,
6038 &pipe_config->pipe_src_w,
6039 &pipe_config->pipe_src_h);
6041 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6042 struct intel_encoder *encoder =
6043 to_intel_encoder(connector_state->best_encoder);
6045 if (connector_state->crtc != crtc)
6048 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6049 drm_dbg_kms(&i915->drm,
6050 "rejecting invalid cloning configuration\n");
6055 * Determine output_types before calling the .compute_config()
6056 * hooks so that the hooks can use this information safely.
6058 if (encoder->compute_output_type)
6059 pipe_config->output_types |=
6060 BIT(encoder->compute_output_type(encoder, pipe_config,
6063 pipe_config->output_types |= BIT(encoder->type);
6067 /* Ensure the port clock defaults are reset when retrying. */
6068 pipe_config->port_clock = 0;
6069 pipe_config->pixel_multiplier = 1;
6071 /* Fill in default crtc timings, allow encoders to overwrite them. */
6072 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6073 CRTC_STEREO_DOUBLE);
6075 /* Pass our mode to the connectors and the CRTC to give them a chance to
6076 * adjust it according to limitations or connector properties, and also
6077 * a chance to reject the mode entirely.
6079 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6080 struct intel_encoder *encoder =
6081 to_intel_encoder(connector_state->best_encoder);
6083 if (connector_state->crtc != crtc)
6086 ret = encoder->compute_config(encoder, pipe_config,
6088 if (ret == -EDEADLK)
6091 drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6096 /* Set default port clock if not overwritten by the encoder. Needs to be
6097 * done afterwards in case the encoder adjusts the mode. */
6098 if (!pipe_config->port_clock)
6099 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6100 * pipe_config->pixel_multiplier;
6102 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6103 if (ret == -EDEADLK)
6105 if (ret == -EAGAIN) {
6106 if (drm_WARN(&i915->drm, !retry,
6107 "loop in pipe configuration computation\n"))
6110 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6115 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6119 /* Dithering seems to not pass-through bits correctly when it should, so
6120 * only enable it on 6bpc panels and when its not a compliance
6121 * test requesting 6bpc video pattern.
6123 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6124 !pipe_config->dither_force_disable;
6125 drm_dbg_kms(&i915->drm,
6126 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6127 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6133 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6135 struct intel_atomic_state *state =
6136 to_intel_atomic_state(crtc_state->uapi.state);
6137 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6138 struct drm_connector_state *conn_state;
6139 struct drm_connector *connector;
6142 for_each_new_connector_in_state(&state->base, connector,
6144 struct intel_encoder *encoder =
6145 to_intel_encoder(conn_state->best_encoder);
6148 if (conn_state->crtc != &crtc->base ||
6149 !encoder->compute_config_late)
6152 ret = encoder->compute_config_late(encoder, crtc_state,
6161 bool intel_fuzzy_clock_check(int clock1, int clock2)
6165 if (clock1 == clock2)
6168 if (!clock1 || !clock2)
6171 diff = abs(clock1 - clock2);
6173 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6180 intel_compare_m_n(unsigned int m, unsigned int n,
6181 unsigned int m2, unsigned int n2,
6184 if (m == m2 && n == n2)
6187 if (exact || !m || !n || !m2 || !n2)
6190 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6197 } else if (n < n2) {
6207 return intel_fuzzy_clock_check(m, m2);
6211 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6212 const struct intel_link_m_n *m2_n2,
6215 return m_n->tu == m2_n2->tu &&
6216 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
6217 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
6218 intel_compare_m_n(m_n->link_m, m_n->link_n,
6219 m2_n2->link_m, m2_n2->link_n, exact);
6223 intel_compare_infoframe(const union hdmi_infoframe *a,
6224 const union hdmi_infoframe *b)
6226 return memcmp(a, b, sizeof(*a)) == 0;
6230 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6231 const struct drm_dp_vsc_sdp *b)
6233 return memcmp(a, b, sizeof(*a)) == 0;
6237 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6238 bool fastset, const char *name,
6239 const union hdmi_infoframe *a,
6240 const union hdmi_infoframe *b)
6243 if (!drm_debug_enabled(DRM_UT_KMS))
6246 drm_dbg_kms(&dev_priv->drm,
6247 "fastset mismatch in %s infoframe\n", name);
6248 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6249 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6250 drm_dbg_kms(&dev_priv->drm, "found:\n");
6251 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6253 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6254 drm_err(&dev_priv->drm, "expected:\n");
6255 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6256 drm_err(&dev_priv->drm, "found:\n");
6257 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6262 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6263 bool fastset, const char *name,
6264 const struct drm_dp_vsc_sdp *a,
6265 const struct drm_dp_vsc_sdp *b)
6268 if (!drm_debug_enabled(DRM_UT_KMS))
6271 drm_dbg_kms(&dev_priv->drm,
6272 "fastset mismatch in %s dp sdp\n", name);
6273 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6274 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6275 drm_dbg_kms(&dev_priv->drm, "found:\n");
6276 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6278 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6279 drm_err(&dev_priv->drm, "expected:\n");
6280 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6281 drm_err(&dev_priv->drm, "found:\n");
6282 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6286 static void __printf(4, 5)
6287 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6288 const char *name, const char *format, ...)
6290 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6291 struct va_format vaf;
6294 va_start(args, format);
6299 drm_dbg_kms(&i915->drm,
6300 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6301 crtc->base.base.id, crtc->base.name, name, &vaf);
6303 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6304 crtc->base.base.id, crtc->base.name, name, &vaf);
6309 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6311 if (dev_priv->params.fastboot != -1)
6312 return dev_priv->params.fastboot;
6314 /* Enable fastboot by default on Skylake and newer */
6315 if (DISPLAY_VER(dev_priv) >= 9)
6318 /* Enable fastboot by default on VLV and CHV */
6319 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6322 /* Disabled by default on all others */
6327 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6328 const struct intel_crtc_state *pipe_config,
6331 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6332 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6335 bool fixup_inherited = fastset &&
6336 current_config->inherited && !pipe_config->inherited;
6338 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6339 drm_dbg_kms(&dev_priv->drm,
6340 "initial modeset and fastboot not set\n");
6344 #define PIPE_CONF_CHECK_X(name) do { \
6345 if (current_config->name != pipe_config->name) { \
6346 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6347 "(expected 0x%08x, found 0x%08x)", \
6348 current_config->name, \
6349 pipe_config->name); \
6354 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6355 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6356 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6357 "(expected 0x%08x, found 0x%08x)", \
6358 current_config->name & (mask), \
6359 pipe_config->name & (mask)); \
6364 #define PIPE_CONF_CHECK_I(name) do { \
6365 if (current_config->name != pipe_config->name) { \
6366 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6367 "(expected %i, found %i)", \
6368 current_config->name, \
6369 pipe_config->name); \
6374 #define PIPE_CONF_CHECK_BOOL(name) do { \
6375 if (current_config->name != pipe_config->name) { \
6376 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6377 "(expected %s, found %s)", \
6378 yesno(current_config->name), \
6379 yesno(pipe_config->name)); \
6385 * Checks state where we only read out the enabling, but not the entire
6386 * state itself (like full infoframes or ELD for audio). These states
6387 * require a full modeset on bootup to fix up.
6389 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6390 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6391 PIPE_CONF_CHECK_BOOL(name); \
6393 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6394 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6395 yesno(current_config->name), \
6396 yesno(pipe_config->name)); \
6401 #define PIPE_CONF_CHECK_P(name) do { \
6402 if (current_config->name != pipe_config->name) { \
6403 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6404 "(expected %p, found %p)", \
6405 current_config->name, \
6406 pipe_config->name); \
6411 #define PIPE_CONF_CHECK_M_N(name) do { \
6412 if (!intel_compare_link_m_n(¤t_config->name, \
6413 &pipe_config->name,\
6415 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6416 "(expected tu %i gmch %i/%i link %i/%i, " \
6417 "found tu %i, gmch %i/%i link %i/%i)", \
6418 current_config->name.tu, \
6419 current_config->name.gmch_m, \
6420 current_config->name.gmch_n, \
6421 current_config->name.link_m, \
6422 current_config->name.link_n, \
6423 pipe_config->name.tu, \
6424 pipe_config->name.gmch_m, \
6425 pipe_config->name.gmch_n, \
6426 pipe_config->name.link_m, \
6427 pipe_config->name.link_n); \
6432 /* This is required for BDW+ where there is only one set of registers for
6433 * switching between high and low RR.
6434 * This macro can be used whenever a comparison has to be made between one
6435 * hw state and multiple sw state variables.
6437 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6438 if (!intel_compare_link_m_n(¤t_config->name, \
6439 &pipe_config->name, !fastset) && \
6440 !intel_compare_link_m_n(¤t_config->alt_name, \
6441 &pipe_config->name, !fastset)) { \
6442 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6443 "(expected tu %i gmch %i/%i link %i/%i, " \
6444 "or tu %i gmch %i/%i link %i/%i, " \
6445 "found tu %i, gmch %i/%i link %i/%i)", \
6446 current_config->name.tu, \
6447 current_config->name.gmch_m, \
6448 current_config->name.gmch_n, \
6449 current_config->name.link_m, \
6450 current_config->name.link_n, \
6451 current_config->alt_name.tu, \
6452 current_config->alt_name.gmch_m, \
6453 current_config->alt_name.gmch_n, \
6454 current_config->alt_name.link_m, \
6455 current_config->alt_name.link_n, \
6456 pipe_config->name.tu, \
6457 pipe_config->name.gmch_m, \
6458 pipe_config->name.gmch_n, \
6459 pipe_config->name.link_m, \
6460 pipe_config->name.link_n); \
6465 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6466 if ((current_config->name ^ pipe_config->name) & (mask)) { \
6467 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6468 "(%x) (expected %i, found %i)", \
6470 current_config->name & (mask), \
6471 pipe_config->name & (mask)); \
6476 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6477 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6478 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6479 "(expected %i, found %i)", \
6480 current_config->name, \
6481 pipe_config->name); \
6486 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6487 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
6488 &pipe_config->infoframes.name)) { \
6489 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6490 ¤t_config->infoframes.name, \
6491 &pipe_config->infoframes.name); \
6496 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6497 if (!current_config->has_psr && !pipe_config->has_psr && \
6498 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
6499 &pipe_config->infoframes.name)) { \
6500 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6501 ¤t_config->infoframes.name, \
6502 &pipe_config->infoframes.name); \
6507 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6508 if (current_config->name1 != pipe_config->name1) { \
6509 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6510 "(expected %i, found %i, won't compare lut values)", \
6511 current_config->name1, \
6512 pipe_config->name1); \
6515 if (!intel_color_lut_equal(current_config->name2, \
6516 pipe_config->name2, pipe_config->name1, \
6518 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6519 "hw_state doesn't match sw_state"); \
6525 #define PIPE_CONF_QUIRK(quirk) \
6526 ((current_config->quirks | pipe_config->quirks) & (quirk))
6528 PIPE_CONF_CHECK_I(cpu_transcoder);
6530 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6531 PIPE_CONF_CHECK_I(fdi_lanes);
6532 PIPE_CONF_CHECK_M_N(fdi_m_n);
6534 PIPE_CONF_CHECK_I(lane_count);
6535 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6537 if (DISPLAY_VER(dev_priv) < 8) {
6538 PIPE_CONF_CHECK_M_N(dp_m_n);
6540 if (current_config->has_drrs)
6541 PIPE_CONF_CHECK_M_N(dp_m2_n2);
6543 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6545 PIPE_CONF_CHECK_X(output_types);
6547 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6548 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6549 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6550 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6551 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6552 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6554 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6555 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6556 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6557 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6558 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6559 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6561 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6562 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6563 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6564 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6565 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6566 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6568 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6569 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6570 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6571 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6572 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6573 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6575 PIPE_CONF_CHECK_I(pixel_multiplier);
6577 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6578 DRM_MODE_FLAG_INTERLACE);
6580 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6581 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6582 DRM_MODE_FLAG_PHSYNC);
6583 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6584 DRM_MODE_FLAG_NHSYNC);
6585 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6586 DRM_MODE_FLAG_PVSYNC);
6587 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6588 DRM_MODE_FLAG_NVSYNC);
6591 PIPE_CONF_CHECK_I(output_format);
6592 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6593 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6594 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6595 PIPE_CONF_CHECK_BOOL(limited_color_range);
6597 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6598 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6599 PIPE_CONF_CHECK_BOOL(has_infoframe);
6600 PIPE_CONF_CHECK_BOOL(fec_enable);
6602 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6604 PIPE_CONF_CHECK_X(gmch_pfit.control);
6605 /* pfit ratios are autocomputed by the hw on gen4+ */
6606 if (DISPLAY_VER(dev_priv) < 4)
6607 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6608 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6611 * Changing the EDP transcoder input mux
6612 * (A_ONOFF vs. A_ON) requires a full modeset.
6614 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6617 PIPE_CONF_CHECK_I(pipe_src_w);
6618 PIPE_CONF_CHECK_I(pipe_src_h);
6620 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6621 if (current_config->pch_pfit.enabled) {
6622 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6623 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6624 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6625 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6628 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6629 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6631 PIPE_CONF_CHECK_X(gamma_mode);
6632 if (IS_CHERRYVIEW(dev_priv))
6633 PIPE_CONF_CHECK_X(cgm_mode);
6635 PIPE_CONF_CHECK_X(csc_mode);
6636 PIPE_CONF_CHECK_BOOL(gamma_enable);
6637 PIPE_CONF_CHECK_BOOL(csc_enable);
6639 PIPE_CONF_CHECK_I(linetime);
6640 PIPE_CONF_CHECK_I(ips_linetime);
6642 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6644 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6646 if (current_config->active_planes) {
6647 PIPE_CONF_CHECK_BOOL(has_psr);
6648 PIPE_CONF_CHECK_BOOL(has_psr2);
6649 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6650 PIPE_CONF_CHECK_I(dc3co_exitline);
6654 PIPE_CONF_CHECK_BOOL(double_wide);
6656 if (dev_priv->dpll.mgr) {
6657 PIPE_CONF_CHECK_P(shared_dpll);
6659 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6660 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6661 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6662 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6663 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6664 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6665 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6666 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6667 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6668 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6669 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6670 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6671 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6672 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6673 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6674 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6675 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6676 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6677 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6678 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6679 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6680 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6681 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6682 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6683 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6684 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6685 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6686 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6687 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6688 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6689 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6692 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6693 PIPE_CONF_CHECK_X(dsi_pll.div);
6695 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6696 PIPE_CONF_CHECK_I(pipe_bpp);
6698 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6699 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6700 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6702 PIPE_CONF_CHECK_I(min_voltage_level);
6704 if (current_config->has_psr || pipe_config->has_psr)
6705 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6706 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6708 PIPE_CONF_CHECK_X(infoframes.enable);
6710 PIPE_CONF_CHECK_X(infoframes.gcp);
6711 PIPE_CONF_CHECK_INFOFRAME(avi);
6712 PIPE_CONF_CHECK_INFOFRAME(spd);
6713 PIPE_CONF_CHECK_INFOFRAME(hdmi);
6714 PIPE_CONF_CHECK_INFOFRAME(drm);
6715 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6717 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6718 PIPE_CONF_CHECK_I(master_transcoder);
6719 PIPE_CONF_CHECK_BOOL(bigjoiner);
6720 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6721 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6723 PIPE_CONF_CHECK_I(dsc.compression_enable);
6724 PIPE_CONF_CHECK_I(dsc.dsc_split);
6725 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6727 PIPE_CONF_CHECK_BOOL(splitter.enable);
6728 PIPE_CONF_CHECK_I(splitter.link_count);
6729 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6731 PIPE_CONF_CHECK_I(mst_master_transcoder);
6733 PIPE_CONF_CHECK_BOOL(vrr.enable);
6734 PIPE_CONF_CHECK_I(vrr.vmin);
6735 PIPE_CONF_CHECK_I(vrr.vmax);
6736 PIPE_CONF_CHECK_I(vrr.flipline);
6737 PIPE_CONF_CHECK_I(vrr.pipeline_full);
6738 PIPE_CONF_CHECK_I(vrr.guardband);
6740 #undef PIPE_CONF_CHECK_X
6741 #undef PIPE_CONF_CHECK_I
6742 #undef PIPE_CONF_CHECK_BOOL
6743 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6744 #undef PIPE_CONF_CHECK_P
6745 #undef PIPE_CONF_CHECK_FLAGS
6746 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6747 #undef PIPE_CONF_CHECK_COLOR_LUT
6748 #undef PIPE_CONF_QUIRK
6753 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6754 const struct intel_crtc_state *pipe_config)
6756 if (pipe_config->has_pch_encoder) {
6757 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6758 &pipe_config->fdi_m_n);
6759 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6762 * FDI already provided one idea for the dotclock.
6763 * Yell if the encoder disagrees.
6765 drm_WARN(&dev_priv->drm,
6766 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6767 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6768 fdi_dotclock, dotclock);
6772 static void verify_wm_state(struct intel_crtc *crtc,
6773 struct intel_crtc_state *new_crtc_state)
6775 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6776 struct skl_hw_state {
6777 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6778 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6779 struct skl_pipe_wm wm;
6781 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6782 int level, max_level = ilk_wm_max_level(dev_priv);
6783 struct intel_plane *plane;
6784 u8 hw_enabled_slices;
6786 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6789 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6793 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6795 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6797 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6799 if (DISPLAY_VER(dev_priv) >= 11 &&
6800 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6801 drm_err(&dev_priv->drm,
6802 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6803 dev_priv->dbuf.enabled_slices,
6806 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6807 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6808 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6811 for (level = 0; level <= max_level; level++) {
6812 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6813 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6815 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6818 drm_err(&dev_priv->drm,
6819 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6820 plane->base.base.id, plane->base.name, level,
6821 sw_wm_level->enable,
6822 sw_wm_level->blocks,
6824 hw_wm_level->enable,
6825 hw_wm_level->blocks,
6826 hw_wm_level->lines);
6829 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6830 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6832 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6833 drm_err(&dev_priv->drm,
6834 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6835 plane->base.base.id, plane->base.name,
6836 sw_wm_level->enable,
6837 sw_wm_level->blocks,
6839 hw_wm_level->enable,
6840 hw_wm_level->blocks,
6841 hw_wm_level->lines);
6844 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6845 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6847 if (HAS_HW_SAGV_WM(dev_priv) &&
6848 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6849 drm_err(&dev_priv->drm,
6850 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6851 plane->base.base.id, plane->base.name,
6852 sw_wm_level->enable,
6853 sw_wm_level->blocks,
6855 hw_wm_level->enable,
6856 hw_wm_level->blocks,
6857 hw_wm_level->lines);
6860 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6861 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6863 if (HAS_HW_SAGV_WM(dev_priv) &&
6864 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6865 drm_err(&dev_priv->drm,
6866 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6867 plane->base.base.id, plane->base.name,
6868 sw_wm_level->enable,
6869 sw_wm_level->blocks,
6871 hw_wm_level->enable,
6872 hw_wm_level->blocks,
6873 hw_wm_level->lines);
6877 hw_ddb_entry = &hw->ddb_y[plane->id];
6878 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6880 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6881 drm_err(&dev_priv->drm,
6882 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6883 plane->base.base.id, plane->base.name,
6884 sw_ddb_entry->start, sw_ddb_entry->end,
6885 hw_ddb_entry->start, hw_ddb_entry->end);
6893 verify_connector_state(struct intel_atomic_state *state,
6894 struct intel_crtc *crtc)
6896 struct drm_connector *connector;
6897 struct drm_connector_state *new_conn_state;
6900 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6901 struct drm_encoder *encoder = connector->encoder;
6902 struct intel_crtc_state *crtc_state = NULL;
6904 if (new_conn_state->crtc != &crtc->base)
6908 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6910 intel_connector_verify_state(crtc_state, new_conn_state);
6912 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6913 "connector's atomic encoder doesn't match legacy encoder\n");
6918 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6920 struct intel_encoder *encoder;
6921 struct drm_connector *connector;
6922 struct drm_connector_state *old_conn_state, *new_conn_state;
6925 for_each_intel_encoder(&dev_priv->drm, encoder) {
6926 bool enabled = false, found = false;
6929 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6930 encoder->base.base.id,
6931 encoder->base.name);
6933 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6934 new_conn_state, i) {
6935 if (old_conn_state->best_encoder == &encoder->base)
6938 if (new_conn_state->best_encoder != &encoder->base)
6940 found = enabled = true;
6942 I915_STATE_WARN(new_conn_state->crtc !=
6944 "connector's crtc doesn't match encoder crtc\n");
6950 I915_STATE_WARN(!!encoder->base.crtc != enabled,
6951 "encoder's enabled state mismatch "
6952 "(expected %i, found %i)\n",
6953 !!encoder->base.crtc, enabled);
6955 if (!encoder->base.crtc) {
6958 active = encoder->get_hw_state(encoder, &pipe);
6959 I915_STATE_WARN(active,
6960 "encoder detached but still enabled on pipe %c.\n",
6967 verify_crtc_state(struct intel_crtc *crtc,
6968 struct intel_crtc_state *old_crtc_state,
6969 struct intel_crtc_state *new_crtc_state)
6971 struct drm_device *dev = crtc->base.dev;
6972 struct drm_i915_private *dev_priv = to_i915(dev);
6973 struct intel_encoder *encoder;
6974 struct intel_crtc_state *pipe_config = old_crtc_state;
6975 struct drm_atomic_state *state = old_crtc_state->uapi.state;
6976 struct intel_crtc *master_crtc;
6978 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6979 intel_crtc_free_hw_state(old_crtc_state);
6980 intel_crtc_state_reset(old_crtc_state, crtc);
6981 old_crtc_state->uapi.state = state;
6983 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6986 pipe_config->hw.enable = new_crtc_state->hw.enable;
6988 intel_crtc_get_pipe_config(pipe_config);
6990 /* we keep both pipes enabled on 830 */
6991 if (IS_I830(dev_priv) && pipe_config->hw.active)
6992 pipe_config->hw.active = new_crtc_state->hw.active;
6994 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6995 "crtc active state doesn't match with hw state "
6996 "(expected %i, found %i)\n",
6997 new_crtc_state->hw.active, pipe_config->hw.active);
6999 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
7000 "transitional active state does not match atomic hw state "
7001 "(expected %i, found %i)\n",
7002 new_crtc_state->hw.active, crtc->active);
7004 master_crtc = intel_master_crtc(new_crtc_state);
7006 for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
7010 active = encoder->get_hw_state(encoder, &pipe);
7011 I915_STATE_WARN(active != new_crtc_state->hw.active,
7012 "[ENCODER:%i] active %i with crtc active %i\n",
7013 encoder->base.base.id, active,
7014 new_crtc_state->hw.active);
7016 I915_STATE_WARN(active && master_crtc->pipe != pipe,
7017 "Encoder connected to wrong pipe %c\n",
7021 intel_encoder_get_config(encoder, pipe_config);
7024 if (!new_crtc_state->hw.active)
7027 intel_pipe_config_sanity_check(dev_priv, pipe_config);
7029 if (!intel_pipe_config_compare(new_crtc_state,
7030 pipe_config, false)) {
7031 I915_STATE_WARN(1, "pipe state doesn't match!\n");
7032 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
7033 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7038 intel_verify_planes(struct intel_atomic_state *state)
7040 struct intel_plane *plane;
7041 const struct intel_plane_state *plane_state;
7044 for_each_new_intel_plane_in_state(state, plane,
7046 assert_plane(plane, plane_state->planar_slave ||
7047 plane_state->uapi.visible);
7051 verify_single_dpll_state(struct drm_i915_private *dev_priv,
7052 struct intel_shared_dpll *pll,
7053 struct intel_crtc *crtc,
7054 struct intel_crtc_state *new_crtc_state)
7056 struct intel_dpll_hw_state dpll_hw_state;
7060 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7062 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7064 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7066 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7067 I915_STATE_WARN(!pll->on && pll->active_mask,
7068 "pll in active use but not on in sw tracking\n");
7069 I915_STATE_WARN(pll->on && !pll->active_mask,
7070 "pll is on but not used by any active pipe\n");
7071 I915_STATE_WARN(pll->on != active,
7072 "pll on state mismatch (expected %i, found %i)\n",
7077 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7078 "more active pll users than references: 0x%x vs 0x%x\n",
7079 pll->active_mask, pll->state.pipe_mask);
7084 pipe_mask = BIT(crtc->pipe);
7086 if (new_crtc_state->hw.active)
7087 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7088 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7089 pipe_name(crtc->pipe), pll->active_mask);
7091 I915_STATE_WARN(pll->active_mask & pipe_mask,
7092 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7093 pipe_name(crtc->pipe), pll->active_mask);
7095 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7096 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7097 pipe_mask, pll->state.pipe_mask);
7099 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7101 sizeof(dpll_hw_state)),
7102 "pll hw state mismatch\n");
7106 verify_shared_dpll_state(struct intel_crtc *crtc,
7107 struct intel_crtc_state *old_crtc_state,
7108 struct intel_crtc_state *new_crtc_state)
7110 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7112 if (new_crtc_state->shared_dpll)
7113 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7115 if (old_crtc_state->shared_dpll &&
7116 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7117 u8 pipe_mask = BIT(crtc->pipe);
7118 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7120 I915_STATE_WARN(pll->active_mask & pipe_mask,
7121 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7122 pipe_name(crtc->pipe), pll->active_mask);
7123 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7124 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7125 pipe_name(crtc->pipe), pll->state.pipe_mask);
7130 verify_mpllb_state(struct intel_atomic_state *state,
7131 struct intel_crtc_state *new_crtc_state)
7133 struct drm_i915_private *i915 = to_i915(state->base.dev);
7134 struct intel_mpllb_state mpllb_hw_state = { 0 };
7135 struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7136 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7137 struct intel_encoder *encoder;
7142 if (!new_crtc_state->hw.active)
7145 encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7146 intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7148 #define MPLLB_CHECK(name) do { \
7149 if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7150 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7151 "(expected 0x%08x, found 0x%08x)", \
7152 mpllb_sw_state->name, \
7153 mpllb_hw_state.name); \
7157 MPLLB_CHECK(mpllb_cp);
7158 MPLLB_CHECK(mpllb_div);
7159 MPLLB_CHECK(mpllb_div2);
7160 MPLLB_CHECK(mpllb_fracn1);
7161 MPLLB_CHECK(mpllb_fracn2);
7162 MPLLB_CHECK(mpllb_sscen);
7163 MPLLB_CHECK(mpllb_sscstep);
7166 * ref_control is handled by the hardware/firemware and never
7167 * programmed by the software, but the proper values are supplied
7168 * in the bspec for verification purposes.
7170 MPLLB_CHECK(ref_control);
7176 intel_modeset_verify_crtc(struct intel_crtc *crtc,
7177 struct intel_atomic_state *state,
7178 struct intel_crtc_state *old_crtc_state,
7179 struct intel_crtc_state *new_crtc_state)
7181 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7184 verify_wm_state(crtc, new_crtc_state);
7185 verify_connector_state(state, crtc);
7186 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7187 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7188 verify_mpllb_state(state, new_crtc_state);
7192 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7196 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7197 verify_single_dpll_state(dev_priv,
7198 &dev_priv->dpll.shared_dplls[i],
7203 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7204 struct intel_atomic_state *state)
7206 verify_encoder_state(dev_priv, state);
7207 verify_connector_state(state, NULL);
7208 verify_disabled_dpll_state(dev_priv);
7211 int intel_modeset_all_pipes(struct intel_atomic_state *state)
7213 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7214 struct intel_crtc *crtc;
7217 * Add all pipes to the state, and force
7218 * a modeset on all the active ones.
7220 for_each_intel_crtc(&dev_priv->drm, crtc) {
7221 struct intel_crtc_state *crtc_state;
7224 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7225 if (IS_ERR(crtc_state))
7226 return PTR_ERR(crtc_state);
7228 if (!crtc_state->hw.active ||
7229 drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7232 crtc_state->uapi.mode_changed = true;
7234 ret = drm_atomic_add_affected_connectors(&state->base,
7239 ret = intel_atomic_add_affected_planes(state, crtc);
7243 crtc_state->update_planes |= crtc_state->active_planes;
7250 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7252 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7253 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7254 struct drm_display_mode adjusted_mode =
7255 crtc_state->hw.adjusted_mode;
7257 if (crtc_state->vrr.enable) {
7258 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7259 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7260 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7261 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7264 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7266 crtc->mode_flags = crtc_state->mode_flags;
7269 * The scanline counter increments at the leading edge of hsync.
7271 * On most platforms it starts counting from vtotal-1 on the
7272 * first active line. That means the scanline counter value is
7273 * always one less than what we would expect. Ie. just after
7274 * start of vblank, which also occurs at start of hsync (on the
7275 * last active line), the scanline counter will read vblank_start-1.
7277 * On gen2 the scanline counter starts counting from 1 instead
7278 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7279 * to keep the value positive), instead of adding one.
7281 * On HSW+ the behaviour of the scanline counter depends on the output
7282 * type. For DP ports it behaves like most other platforms, but on HDMI
7283 * there's an extra 1 line difference. So we need to add two instead of
7286 * On VLV/CHV DSI the scanline counter would appear to increment
7287 * approx. 1/3 of a scanline before start of vblank. Unfortunately
7288 * that means we can't tell whether we're in vblank or not while
7289 * we're on that particular line. We must still set scanline_offset
7290 * to 1 so that the vblank timestamps come out correct when we query
7291 * the scanline counter from within the vblank interrupt handler.
7292 * However if queried just before the start of vblank we'll get an
7293 * answer that's slightly in the future.
7295 if (DISPLAY_VER(dev_priv) == 2) {
7298 vtotal = adjusted_mode.crtc_vtotal;
7299 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7302 crtc->scanline_offset = vtotal - 1;
7303 } else if (HAS_DDI(dev_priv) &&
7304 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7305 crtc->scanline_offset = 2;
7307 crtc->scanline_offset = 1;
7311 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7313 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7314 struct intel_crtc_state *new_crtc_state;
7315 struct intel_crtc *crtc;
7318 if (!dev_priv->dpll_funcs)
7321 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7322 if (!intel_crtc_needs_modeset(new_crtc_state))
7325 intel_release_shared_dplls(state, crtc);
7330 * This implements the workaround described in the "notes" section of the mode
7331 * set sequence documentation. When going from no pipes or single pipe to
7332 * multiple pipes, and planes are enabled after the pipe, we need to wait at
7333 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7335 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7337 struct intel_crtc_state *crtc_state;
7338 struct intel_crtc *crtc;
7339 struct intel_crtc_state *first_crtc_state = NULL;
7340 struct intel_crtc_state *other_crtc_state = NULL;
7341 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7344 /* look at all crtc's that are going to be enabled in during modeset */
7345 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7346 if (!crtc_state->hw.active ||
7347 !intel_crtc_needs_modeset(crtc_state))
7350 if (first_crtc_state) {
7351 other_crtc_state = crtc_state;
7354 first_crtc_state = crtc_state;
7355 first_pipe = crtc->pipe;
7359 /* No workaround needed? */
7360 if (!first_crtc_state)
7363 /* w/a possibly needed, check how many crtc's are already enabled. */
7364 for_each_intel_crtc(state->base.dev, crtc) {
7365 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7366 if (IS_ERR(crtc_state))
7367 return PTR_ERR(crtc_state);
7369 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7371 if (!crtc_state->hw.active ||
7372 intel_crtc_needs_modeset(crtc_state))
7375 /* 2 or more enabled crtcs means no need for w/a */
7376 if (enabled_pipe != INVALID_PIPE)
7379 enabled_pipe = crtc->pipe;
7382 if (enabled_pipe != INVALID_PIPE)
7383 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7384 else if (other_crtc_state)
7385 other_crtc_state->hsw_workaround_pipe = first_pipe;
7390 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7393 const struct intel_crtc_state *crtc_state;
7394 struct intel_crtc *crtc;
7397 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7398 if (crtc_state->hw.active)
7399 active_pipes |= BIT(crtc->pipe);
7401 active_pipes &= ~BIT(crtc->pipe);
7404 return active_pipes;
7407 static int intel_modeset_checks(struct intel_atomic_state *state)
7409 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7411 state->modeset = true;
7413 if (IS_HASWELL(dev_priv))
7414 return hsw_mode_set_planes_workaround(state);
7419 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7420 struct intel_crtc_state *new_crtc_state)
7422 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7425 new_crtc_state->uapi.mode_changed = false;
7426 new_crtc_state->update_pipe = true;
7429 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7430 struct intel_crtc_state *new_crtc_state)
7433 * If we're not doing the full modeset we want to
7434 * keep the current M/N values as they may be
7435 * sufficiently different to the computed values
7436 * to cause problems.
7438 * FIXME: should really copy more fuzzy state here
7440 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7441 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7442 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7443 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7446 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7447 struct intel_crtc *crtc,
7450 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7451 struct intel_plane *plane;
7453 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7454 struct intel_plane_state *plane_state;
7456 if ((plane_ids_mask & BIT(plane->id)) == 0)
7459 plane_state = intel_atomic_get_plane_state(state, plane);
7460 if (IS_ERR(plane_state))
7461 return PTR_ERR(plane_state);
7467 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7468 struct intel_crtc *crtc)
7470 const struct intel_crtc_state *old_crtc_state =
7471 intel_atomic_get_old_crtc_state(state, crtc);
7472 const struct intel_crtc_state *new_crtc_state =
7473 intel_atomic_get_new_crtc_state(state, crtc);
7475 return intel_crtc_add_planes_to_state(state, crtc,
7476 old_crtc_state->enabled_planes |
7477 new_crtc_state->enabled_planes);
7480 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7482 /* See {hsw,vlv,ivb}_plane_ratio() */
7483 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7484 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7485 IS_IVYBRIDGE(dev_priv);
7488 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7489 struct intel_crtc *crtc,
7490 struct intel_crtc *other)
7492 const struct intel_plane_state *plane_state;
7493 struct intel_plane *plane;
7497 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7498 if (plane->pipe == crtc->pipe)
7499 plane_ids |= BIT(plane->id);
7502 return intel_crtc_add_planes_to_state(state, other, plane_ids);
7505 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7507 const struct intel_crtc_state *crtc_state;
7508 struct intel_crtc *crtc;
7511 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7514 if (!crtc_state->bigjoiner)
7517 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7518 crtc_state->bigjoiner_linked_crtc);
7526 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7528 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7529 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7530 struct intel_plane_state *plane_state;
7531 struct intel_plane *plane;
7532 struct intel_crtc *crtc;
7535 ret = icl_add_linked_planes(state);
7539 ret = intel_bigjoiner_add_affected_planes(state);
7543 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7544 ret = intel_plane_atomic_check(state, plane);
7546 drm_dbg_atomic(&dev_priv->drm,
7547 "[PLANE:%d:%s] atomic driver check failed\n",
7548 plane->base.base.id, plane->base.name);
7553 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7554 new_crtc_state, i) {
7555 u8 old_active_planes, new_active_planes;
7557 ret = icl_check_nv12_planes(new_crtc_state);
7562 * On some platforms the number of active planes affects
7563 * the planes' minimum cdclk calculation. Add such planes
7564 * to the state before we compute the minimum cdclk.
7566 if (!active_planes_affects_min_cdclk(dev_priv))
7569 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7570 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7572 if (hweight8(old_active_planes) == hweight8(new_active_planes))
7575 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7583 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7585 struct intel_crtc_state *crtc_state;
7586 struct intel_crtc *crtc;
7589 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7590 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7593 ret = intel_crtc_atomic_check(state, crtc);
7595 drm_dbg_atomic(&i915->drm,
7596 "[CRTC:%d:%s] atomic driver check failed\n",
7597 crtc->base.base.id, crtc->base.name);
7605 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7608 const struct intel_crtc_state *new_crtc_state;
7609 struct intel_crtc *crtc;
7612 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7613 if (new_crtc_state->hw.enable &&
7614 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7615 intel_crtc_needs_modeset(new_crtc_state))
7622 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7623 struct intel_crtc *crtc,
7624 struct intel_crtc_state *old_crtc_state,
7625 struct intel_crtc_state *new_crtc_state)
7627 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7628 struct intel_crtc *slave_crtc, *master_crtc;
7630 /* slave being enabled, is master is still claiming this crtc? */
7631 if (old_crtc_state->bigjoiner_slave) {
7633 master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7634 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7635 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7639 if (!new_crtc_state->bigjoiner)
7642 slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7644 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
7645 "CRTC + 1 to be used, doesn't exist\n",
7646 crtc->base.base.id, crtc->base.name);
7650 new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7651 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7653 if (IS_ERR(slave_crtc_state))
7654 return PTR_ERR(slave_crtc_state);
7656 /* master being enabled, slave was already configured? */
7657 if (slave_crtc_state->uapi.enable)
7660 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
7661 slave_crtc->base.base.id, slave_crtc->base.name);
7663 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7666 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7667 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7668 slave_crtc->base.base.id, slave_crtc->base.name,
7669 master_crtc->base.base.id, master_crtc->base.name);
7673 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7674 struct intel_crtc_state *master_crtc_state)
7676 struct intel_crtc_state *slave_crtc_state =
7677 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7679 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7680 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7681 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7682 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7686 * DOC: asynchronous flip implementation
7688 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7689 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7690 * Correspondingly, support is currently added for primary plane only.
7692 * Async flip can only change the plane surface address, so anything else
7693 * changing is rejected from the intel_atomic_check_async() function.
7694 * Once this check is cleared, flip done interrupt is enabled using
7695 * the intel_crtc_enable_flip_done() function.
7697 * As soon as the surface address register is written, flip done interrupt is
7698 * generated and the requested events are sent to the usersapce in the interrupt
7699 * handler itself. The timestamp and sequence sent during the flip done event
7700 * correspond to the last vblank and have no relation to the actual time when
7701 * the flip done event was sent.
7703 static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7705 struct drm_i915_private *i915 = to_i915(state->base.dev);
7706 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7707 const struct intel_plane_state *new_plane_state, *old_plane_state;
7708 struct intel_plane *plane;
7711 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7712 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7714 if (intel_crtc_needs_modeset(new_crtc_state)) {
7715 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7719 if (!new_crtc_state->hw.active) {
7720 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7723 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7724 drm_dbg_kms(&i915->drm,
7725 "Active planes cannot be changed during async flip\n");
7729 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7730 new_plane_state, i) {
7731 if (plane->pipe != crtc->pipe)
7735 * TODO: Async flip is only supported through the page flip IOCTL
7736 * as of now. So support currently added for primary plane only.
7737 * Support for other planes on platforms on which supports
7738 * this(vlv/chv and icl+) should be added when async flip is
7739 * enabled in the atomic IOCTL path.
7741 if (!plane->async_flip)
7745 * FIXME: This check is kept generic for all platforms.
7746 * Need to verify this for all gen9 platforms to enable
7747 * this selectively if required.
7749 switch (new_plane_state->hw.fb->modifier) {
7750 case I915_FORMAT_MOD_X_TILED:
7751 case I915_FORMAT_MOD_Y_TILED:
7752 case I915_FORMAT_MOD_Yf_TILED:
7755 drm_dbg_kms(&i915->drm,
7756 "Linear memory/CCS does not support async flips\n");
7760 if (new_plane_state->hw.fb->format->num_planes > 1) {
7761 drm_dbg_kms(&i915->drm,
7762 "Planar formats not supported with async flips\n");
7766 if (old_plane_state->view.color_plane[0].mapping_stride !=
7767 new_plane_state->view.color_plane[0].mapping_stride) {
7768 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7772 if (old_plane_state->hw.fb->modifier !=
7773 new_plane_state->hw.fb->modifier) {
7774 drm_dbg_kms(&i915->drm,
7775 "Framebuffer modifiers cannot be changed in async flip\n");
7779 if (old_plane_state->hw.fb->format !=
7780 new_plane_state->hw.fb->format) {
7781 drm_dbg_kms(&i915->drm,
7782 "Framebuffer format cannot be changed in async flip\n");
7786 if (old_plane_state->hw.rotation !=
7787 new_plane_state->hw.rotation) {
7788 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7792 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7793 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7794 drm_dbg_kms(&i915->drm,
7795 "Plane size/co-ordinates cannot be changed in async flip\n");
7799 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7800 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7804 if (old_plane_state->hw.pixel_blend_mode !=
7805 new_plane_state->hw.pixel_blend_mode) {
7806 drm_dbg_kms(&i915->drm,
7807 "Pixel blend mode cannot be changed in async flip\n");
7811 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7812 drm_dbg_kms(&i915->drm,
7813 "Color encoding cannot be changed in async flip\n");
7817 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7818 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7822 /* plane decryption is allow to change only in synchronous flips */
7823 if (old_plane_state->decrypt != new_plane_state->decrypt)
7830 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7832 struct intel_crtc_state *crtc_state;
7833 struct intel_crtc *crtc;
7836 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7837 struct intel_crtc_state *linked_crtc_state;
7838 struct intel_crtc *linked_crtc;
7841 if (!crtc_state->bigjoiner)
7844 linked_crtc = crtc_state->bigjoiner_linked_crtc;
7845 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7846 if (IS_ERR(linked_crtc_state))
7847 return PTR_ERR(linked_crtc_state);
7849 if (!intel_crtc_needs_modeset(crtc_state))
7852 linked_crtc_state->uapi.mode_changed = true;
7854 ret = drm_atomic_add_affected_connectors(&state->base,
7855 &linked_crtc->base);
7859 ret = intel_atomic_add_affected_planes(state, linked_crtc);
7864 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7865 /* Kill old bigjoiner link, we may re-establish afterwards */
7866 if (intel_crtc_needs_modeset(crtc_state) &&
7867 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7868 kill_bigjoiner_slave(state, crtc_state);
7875 * intel_atomic_check - validate state object
7877 * @_state: state to validate
7879 static int intel_atomic_check(struct drm_device *dev,
7880 struct drm_atomic_state *_state)
7882 struct drm_i915_private *dev_priv = to_i915(dev);
7883 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7884 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7885 struct intel_crtc *crtc;
7887 bool any_ms = false;
7889 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7890 new_crtc_state, i) {
7891 if (new_crtc_state->inherited != old_crtc_state->inherited)
7892 new_crtc_state->uapi.mode_changed = true;
7895 intel_vrr_check_modeset(state);
7897 ret = drm_atomic_helper_check_modeset(dev, &state->base);
7901 ret = intel_bigjoiner_add_affected_crtcs(state);
7905 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7906 new_crtc_state, i) {
7907 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7909 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7914 if (!new_crtc_state->uapi.enable) {
7915 if (!new_crtc_state->bigjoiner_slave) {
7916 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7922 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7926 ret = intel_modeset_pipe_config(state, new_crtc_state);
7930 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7936 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7937 new_crtc_state, i) {
7938 if (!intel_crtc_needs_modeset(new_crtc_state))
7941 ret = intel_modeset_pipe_config_late(new_crtc_state);
7945 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7949 * Check if fastset is allowed by external dependencies like other
7950 * pipes and transcoders.
7952 * Right now it only forces a fullmodeset when the MST master
7953 * transcoder did not changed but the pipe of the master transcoder
7954 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7955 * in case of port synced crtcs, if one of the synced crtcs
7956 * needs a full modeset, all other synced crtcs should be
7957 * forced a full modeset.
7959 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7960 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7963 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7964 enum transcoder master = new_crtc_state->mst_master_transcoder;
7966 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7967 new_crtc_state->uapi.mode_changed = true;
7968 new_crtc_state->update_pipe = false;
7972 if (is_trans_port_sync_mode(new_crtc_state)) {
7973 u8 trans = new_crtc_state->sync_mode_slaves_mask;
7975 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7976 trans |= BIT(new_crtc_state->master_transcoder);
7978 if (intel_cpu_transcoders_need_modeset(state, trans)) {
7979 new_crtc_state->uapi.mode_changed = true;
7980 new_crtc_state->update_pipe = false;
7984 if (new_crtc_state->bigjoiner) {
7985 struct intel_crtc_state *linked_crtc_state =
7986 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
7988 if (intel_crtc_needs_modeset(linked_crtc_state)) {
7989 new_crtc_state->uapi.mode_changed = true;
7990 new_crtc_state->update_pipe = false;
7995 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7996 new_crtc_state, i) {
7997 if (intel_crtc_needs_modeset(new_crtc_state)) {
8002 if (!new_crtc_state->update_pipe)
8005 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
8008 if (any_ms && !check_digital_port_conflicts(state)) {
8009 drm_dbg_kms(&dev_priv->drm,
8010 "rejecting conflicting digital port configuration\n");
8015 ret = drm_dp_mst_atomic_check(&state->base);
8019 ret = intel_atomic_check_planes(state);
8023 ret = intel_compute_global_watermarks(state);
8027 ret = intel_bw_atomic_check(state);
8031 ret = intel_cdclk_atomic_check(state, &any_ms);
8035 if (intel_any_crtc_needs_modeset(state))
8039 ret = intel_modeset_checks(state);
8043 ret = intel_modeset_calc_cdclk(state);
8047 intel_modeset_clear_plls(state);
8050 ret = intel_atomic_check_crtcs(state);
8054 ret = intel_fbc_atomic_check(state);
8058 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8059 new_crtc_state, i) {
8060 if (new_crtc_state->uapi.async_flip) {
8061 ret = intel_atomic_check_async(state, crtc);
8066 if (!intel_crtc_needs_modeset(new_crtc_state) &&
8067 !new_crtc_state->update_pipe)
8070 intel_dump_pipe_config(new_crtc_state, state,
8071 intel_crtc_needs_modeset(new_crtc_state) ?
8072 "[modeset]" : "[fastset]");
8078 if (ret == -EDEADLK)
8082 * FIXME would probably be nice to know which crtc specifically
8083 * caused the failure, in cases where we can pinpoint it.
8085 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8087 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8092 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8094 struct intel_crtc_state *crtc_state;
8095 struct intel_crtc *crtc;
8098 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8102 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8103 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8105 if (mode_changed || crtc_state->update_pipe ||
8106 crtc_state->uapi.color_mgmt_changed) {
8107 intel_dsb_prepare(crtc_state);
8114 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8115 struct intel_crtc_state *crtc_state)
8117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8119 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8120 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8122 if (crtc_state->has_pch_encoder) {
8123 enum pipe pch_transcoder =
8124 intel_crtc_pch_transcoder(crtc);
8126 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8130 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8131 const struct intel_crtc_state *new_crtc_state)
8133 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8137 * Update pipe size and adjust fitter if needed: the reason for this is
8138 * that in compute_mode_changes we check the native mode (not the pfit
8139 * mode) to see if we can flip rather than do a full mode set. In the
8140 * fastboot case, we'll flip, but if we don't update the pipesrc and
8141 * pfit state, we'll end up with a big fb scanned out into the wrong
8144 intel_set_pipe_src_size(new_crtc_state);
8146 /* on skylake this is done by detaching scalers */
8147 if (DISPLAY_VER(dev_priv) >= 9) {
8148 if (new_crtc_state->pch_pfit.enabled)
8149 skl_pfit_enable(new_crtc_state);
8150 } else if (HAS_PCH_SPLIT(dev_priv)) {
8151 if (new_crtc_state->pch_pfit.enabled)
8152 ilk_pfit_enable(new_crtc_state);
8153 else if (old_crtc_state->pch_pfit.enabled)
8154 ilk_pfit_disable(old_crtc_state);
8158 * The register is supposedly single buffered so perhaps
8159 * not 100% correct to do this here. But SKL+ calculate
8160 * this based on the adjust pixel rate so pfit changes do
8161 * affect it and so it must be updated for fastsets.
8162 * HSW/BDW only really need this here for fastboot, after
8163 * that the value should not change without a full modeset.
8165 if (DISPLAY_VER(dev_priv) >= 9 ||
8166 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8167 hsw_set_linetime_wm(new_crtc_state);
8169 if (DISPLAY_VER(dev_priv) >= 11)
8170 icl_set_pipe_chicken(new_crtc_state);
8173 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8174 struct intel_crtc *crtc)
8176 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8177 const struct intel_crtc_state *old_crtc_state =
8178 intel_atomic_get_old_crtc_state(state, crtc);
8179 const struct intel_crtc_state *new_crtc_state =
8180 intel_atomic_get_new_crtc_state(state, crtc);
8181 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8184 * During modesets pipe configuration was programmed as the
8188 if (new_crtc_state->uapi.color_mgmt_changed ||
8189 new_crtc_state->update_pipe)
8190 intel_color_commit(new_crtc_state);
8192 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8193 bdw_set_pipemisc(new_crtc_state);
8195 if (new_crtc_state->update_pipe)
8196 intel_pipe_fastset(old_crtc_state, new_crtc_state);
8199 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8201 intel_atomic_update_watermarks(state, crtc);
8204 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8205 struct intel_crtc *crtc)
8207 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8208 const struct intel_crtc_state *new_crtc_state =
8209 intel_atomic_get_new_crtc_state(state, crtc);
8212 * Disable the scaler(s) after the plane(s) so that we don't
8213 * get a catastrophic underrun even if the two operations
8214 * end up happening in two different frames.
8216 if (DISPLAY_VER(dev_priv) >= 9 &&
8217 !intel_crtc_needs_modeset(new_crtc_state))
8218 skl_detach_scalers(new_crtc_state);
8221 static void intel_enable_crtc(struct intel_atomic_state *state,
8222 struct intel_crtc *crtc)
8224 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8225 const struct intel_crtc_state *new_crtc_state =
8226 intel_atomic_get_new_crtc_state(state, crtc);
8228 if (!intel_crtc_needs_modeset(new_crtc_state))
8231 intel_crtc_update_active_timings(new_crtc_state);
8233 dev_priv->display->crtc_enable(state, crtc);
8235 if (new_crtc_state->bigjoiner_slave)
8238 /* vblanks work again, re-enable pipe CRC. */
8239 intel_crtc_enable_pipe_crc(crtc);
8242 static void intel_update_crtc(struct intel_atomic_state *state,
8243 struct intel_crtc *crtc)
8245 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8246 const struct intel_crtc_state *old_crtc_state =
8247 intel_atomic_get_old_crtc_state(state, crtc);
8248 struct intel_crtc_state *new_crtc_state =
8249 intel_atomic_get_new_crtc_state(state, crtc);
8250 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8253 if (new_crtc_state->preload_luts &&
8254 (new_crtc_state->uapi.color_mgmt_changed ||
8255 new_crtc_state->update_pipe))
8256 intel_color_load_luts(new_crtc_state);
8258 intel_pre_plane_update(state, crtc);
8260 if (new_crtc_state->update_pipe)
8261 intel_encoders_update_pipe(state, crtc);
8264 intel_fbc_update(state, crtc);
8266 intel_update_planes_on_crtc(state, crtc);
8268 /* Perform vblank evasion around commit operation */
8269 intel_pipe_update_start(new_crtc_state);
8271 commit_pipe_pre_planes(state, crtc);
8273 if (DISPLAY_VER(dev_priv) >= 9)
8274 skl_arm_planes_on_crtc(state, crtc);
8276 i9xx_arm_planes_on_crtc(state, crtc);
8278 commit_pipe_post_planes(state, crtc);
8280 intel_pipe_update_end(new_crtc_state);
8283 * We usually enable FIFO underrun interrupts as part of the
8284 * CRTC enable sequence during modesets. But when we inherit a
8285 * valid pipe configuration from the BIOS we need to take care
8286 * of enabling them on the CRTC's first fastset.
8288 if (new_crtc_state->update_pipe && !modeset &&
8289 old_crtc_state->inherited)
8290 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8293 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8294 struct intel_crtc_state *old_crtc_state,
8295 struct intel_crtc_state *new_crtc_state,
8296 struct intel_crtc *crtc)
8298 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8301 * We need to disable pipe CRC before disabling the pipe,
8302 * or we race against vblank off.
8304 intel_crtc_disable_pipe_crc(crtc);
8306 dev_priv->display->crtc_disable(state, crtc);
8307 crtc->active = false;
8308 intel_fbc_disable(crtc);
8309 intel_disable_shared_dpll(old_crtc_state);
8311 /* FIXME unify this for all platforms */
8312 if (!new_crtc_state->hw.active &&
8313 !HAS_GMCH(dev_priv))
8314 intel_initial_watermarks(state, crtc);
8317 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8319 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8320 struct intel_crtc *crtc;
8324 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8325 new_crtc_state, i) {
8326 if (!intel_crtc_needs_modeset(new_crtc_state))
8329 if (!old_crtc_state->hw.active)
8332 intel_pre_plane_update(state, crtc);
8333 intel_crtc_disable_planes(state, crtc);
8336 /* Only disable port sync and MST slaves */
8337 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8338 new_crtc_state, i) {
8339 if (!intel_crtc_needs_modeset(new_crtc_state))
8342 if (!old_crtc_state->hw.active)
8345 /* In case of Transcoder port Sync master slave CRTCs can be
8346 * assigned in any order and we need to make sure that
8347 * slave CRTCs are disabled first and then master CRTC since
8348 * Slave vblanks are masked till Master Vblanks.
8350 if (!is_trans_port_sync_slave(old_crtc_state) &&
8351 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8352 !old_crtc_state->bigjoiner_slave)
8355 intel_old_crtc_state_disables(state, old_crtc_state,
8356 new_crtc_state, crtc);
8357 handled |= BIT(crtc->pipe);
8360 /* Disable everything else left on */
8361 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8362 new_crtc_state, i) {
8363 if (!intel_crtc_needs_modeset(new_crtc_state) ||
8364 (handled & BIT(crtc->pipe)))
8367 if (!old_crtc_state->hw.active)
8370 intel_old_crtc_state_disables(state, old_crtc_state,
8371 new_crtc_state, crtc);
8375 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8377 struct intel_crtc_state *new_crtc_state;
8378 struct intel_crtc *crtc;
8381 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8382 if (!new_crtc_state->hw.active)
8385 intel_enable_crtc(state, crtc);
8386 intel_update_crtc(state, crtc);
8390 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8392 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8393 struct intel_crtc *crtc;
8394 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8395 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8396 u8 update_pipes = 0, modeset_pipes = 0;
8399 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8400 enum pipe pipe = crtc->pipe;
8402 if (!new_crtc_state->hw.active)
8405 /* ignore allocations for crtc's that have been turned off. */
8406 if (!intel_crtc_needs_modeset(new_crtc_state)) {
8407 entries[pipe] = old_crtc_state->wm.skl.ddb;
8408 update_pipes |= BIT(pipe);
8410 modeset_pipes |= BIT(pipe);
8415 * Whenever the number of active pipes changes, we need to make sure we
8416 * update the pipes in the right order so that their ddb allocations
8417 * never overlap with each other between CRTC updates. Otherwise we'll
8418 * cause pipe underruns and other bad stuff.
8420 * So first lets enable all pipes that do not need a fullmodeset as
8421 * those don't have any external dependency.
8423 while (update_pipes) {
8424 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8425 new_crtc_state, i) {
8426 enum pipe pipe = crtc->pipe;
8428 if ((update_pipes & BIT(pipe)) == 0)
8431 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8432 entries, I915_MAX_PIPES, pipe))
8435 entries[pipe] = new_crtc_state->wm.skl.ddb;
8436 update_pipes &= ~BIT(pipe);
8438 intel_update_crtc(state, crtc);
8441 * If this is an already active pipe, it's DDB changed,
8442 * and this isn't the last pipe that needs updating
8443 * then we need to wait for a vblank to pass for the
8444 * new ddb allocation to take effect.
8446 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8447 &old_crtc_state->wm.skl.ddb) &&
8448 (update_pipes | modeset_pipes))
8449 intel_crtc_wait_for_next_vblank(crtc);
8453 update_pipes = modeset_pipes;
8456 * Enable all pipes that needs a modeset and do not depends on other
8459 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8460 enum pipe pipe = crtc->pipe;
8462 if ((modeset_pipes & BIT(pipe)) == 0)
8465 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8466 is_trans_port_sync_master(new_crtc_state) ||
8467 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8470 modeset_pipes &= ~BIT(pipe);
8472 intel_enable_crtc(state, crtc);
8476 * Then we enable all remaining pipes that depend on other
8477 * pipes: MST slaves and port sync masters, big joiner master
8479 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8480 enum pipe pipe = crtc->pipe;
8482 if ((modeset_pipes & BIT(pipe)) == 0)
8485 modeset_pipes &= ~BIT(pipe);
8487 intel_enable_crtc(state, crtc);
8491 * Finally we do the plane updates/etc. for all pipes that got enabled.
8493 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8494 enum pipe pipe = crtc->pipe;
8496 if ((update_pipes & BIT(pipe)) == 0)
8499 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8500 entries, I915_MAX_PIPES, pipe));
8502 entries[pipe] = new_crtc_state->wm.skl.ddb;
8503 update_pipes &= ~BIT(pipe);
8505 intel_update_crtc(state, crtc);
8508 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8509 drm_WARN_ON(&dev_priv->drm, update_pipes);
8512 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8514 struct intel_atomic_state *state, *next;
8515 struct llist_node *freed;
8517 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8518 llist_for_each_entry_safe(state, next, freed, freed)
8519 drm_atomic_state_put(&state->base);
8522 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8524 struct drm_i915_private *dev_priv =
8525 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8527 intel_atomic_helper_free_state(dev_priv);
8530 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8532 struct wait_queue_entry wait_fence, wait_reset;
8533 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8535 init_wait_entry(&wait_fence, 0);
8536 init_wait_entry(&wait_reset, 0);
8538 prepare_to_wait(&intel_state->commit_ready.wait,
8539 &wait_fence, TASK_UNINTERRUPTIBLE);
8540 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
8541 I915_RESET_MODESET),
8542 &wait_reset, TASK_UNINTERRUPTIBLE);
8545 if (i915_sw_fence_done(&intel_state->commit_ready) ||
8546 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
8551 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8552 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
8553 I915_RESET_MODESET),
8557 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8559 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8560 struct intel_crtc *crtc;
8563 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8565 intel_dsb_cleanup(old_crtc_state);
8568 static void intel_atomic_cleanup_work(struct work_struct *work)
8570 struct intel_atomic_state *state =
8571 container_of(work, struct intel_atomic_state, base.commit_work);
8572 struct drm_i915_private *i915 = to_i915(state->base.dev);
8574 intel_cleanup_dsbs(state);
8575 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8576 drm_atomic_helper_commit_cleanup_done(&state->base);
8577 drm_atomic_state_put(&state->base);
8579 intel_atomic_helper_free_state(i915);
8582 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8584 struct drm_i915_private *i915 = to_i915(state->base.dev);
8585 struct intel_plane *plane;
8586 struct intel_plane_state *plane_state;
8589 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8590 struct drm_framebuffer *fb = plane_state->hw.fb;
8597 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8602 * The layout of the fast clear color value expected by HW
8603 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8604 * - 4 x 4 bytes per-channel value
8605 * (in surface type specific float/int format provided by the fb user)
8606 * - 8 bytes native color value used by the display
8607 * (converted/written by GPU during a fast clear operation using the
8608 * above per-channel values)
8610 * The commit's FB prepare hook already ensured that FB obj is pinned and the
8611 * caller made sure that the object is synced wrt. the related color clear value
8614 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8615 fb->offsets[cc_plane] + 16,
8616 &plane_state->ccval,
8617 sizeof(plane_state->ccval));
8618 /* The above could only fail if the FB obj has an unexpected backing store type. */
8619 drm_WARN_ON(&i915->drm, ret);
8623 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8625 struct drm_device *dev = state->base.dev;
8626 struct drm_i915_private *dev_priv = to_i915(dev);
8627 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8628 struct intel_crtc *crtc;
8629 u64 put_domains[I915_MAX_PIPES] = {};
8630 intel_wakeref_t wakeref = 0;
8633 intel_atomic_commit_fence_wait(state);
8635 drm_atomic_helper_wait_for_dependencies(&state->base);
8638 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8640 intel_atomic_prepare_plane_clear_colors(state);
8642 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8643 new_crtc_state, i) {
8644 if (intel_crtc_needs_modeset(new_crtc_state) ||
8645 new_crtc_state->update_pipe) {
8647 put_domains[crtc->pipe] =
8648 modeset_get_crtc_power_domains(new_crtc_state);
8652 intel_commit_modeset_disables(state);
8654 /* FIXME: Eventually get rid of our crtc->config pointer */
8655 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8656 crtc->config = new_crtc_state;
8658 if (state->modeset) {
8659 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8661 intel_set_cdclk_pre_plane_update(state);
8663 intel_modeset_verify_disabled(dev_priv, state);
8666 intel_sagv_pre_plane_update(state);
8668 /* Complete the events for pipes that have now been disabled */
8669 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8670 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8672 /* Complete events for now disable pipes here. */
8673 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8674 spin_lock_irq(&dev->event_lock);
8675 drm_crtc_send_vblank_event(&crtc->base,
8676 new_crtc_state->uapi.event);
8677 spin_unlock_irq(&dev->event_lock);
8679 new_crtc_state->uapi.event = NULL;
8683 intel_encoders_update_prepare(state);
8685 intel_dbuf_pre_plane_update(state);
8687 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8688 if (new_crtc_state->uapi.async_flip)
8689 intel_crtc_enable_flip_done(state, crtc);
8692 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8693 dev_priv->display->commit_modeset_enables(state);
8695 intel_encoders_update_complete(state);
8698 intel_set_cdclk_post_plane_update(state);
8700 intel_wait_for_vblank_workers(state);
8702 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8703 * already, but still need the state for the delayed optimization. To
8705 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8706 * - schedule that vblank worker _before_ calling hw_done
8707 * - at the start of commit_tail, cancel it _synchrously
8708 * - switch over to the vblank wait helper in the core after that since
8709 * we don't need out special handling any more.
8711 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8713 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8714 if (new_crtc_state->uapi.async_flip)
8715 intel_crtc_disable_flip_done(state, crtc);
8719 * Now that the vblank has passed, we can go ahead and program the
8720 * optimal watermarks on platforms that need two-step watermark
8723 * TODO: Move this (and other cleanup) to an async worker eventually.
8725 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8726 new_crtc_state, i) {
8728 * Gen2 reports pipe underruns whenever all planes are disabled.
8729 * So re-enable underrun reporting after some planes get enabled.
8731 * We do this before .optimize_watermarks() so that we have a
8732 * chance of catching underruns with the intermediate watermarks
8733 * vs. the new plane configuration.
8735 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8736 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8738 intel_optimize_watermarks(state, crtc);
8741 intel_dbuf_post_plane_update(state);
8742 intel_psr_post_plane_update(state);
8744 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8745 intel_post_plane_update(state, crtc);
8747 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8749 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8752 * DSB cleanup is done in cleanup_work aligning with framebuffer
8753 * cleanup. So copy and reset the dsb structure to sync with
8754 * commit_done and later do dsb cleanup in cleanup_work.
8756 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8759 /* Underruns don't always raise interrupts, so check manually */
8760 intel_check_cpu_fifo_underruns(dev_priv);
8761 intel_check_pch_fifo_underruns(dev_priv);
8764 intel_verify_planes(state);
8766 intel_sagv_post_plane_update(state);
8768 drm_atomic_helper_commit_hw_done(&state->base);
8770 if (state->modeset) {
8771 /* As one of the primary mmio accessors, KMS has a high
8772 * likelihood of triggering bugs in unclaimed access. After we
8773 * finish modesetting, see if an error has been flagged, and if
8774 * so enable debugging for the next modeset - and hope we catch
8777 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8778 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8780 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8783 * Defer the cleanup of the old state to a separate worker to not
8784 * impede the current task (userspace for blocking modesets) that
8785 * are executed inline. For out-of-line asynchronous modesets/flips,
8786 * deferring to a new worker seems overkill, but we would place a
8787 * schedule point (cond_resched()) here anyway to keep latencies
8790 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8791 queue_work(system_highpri_wq, &state->base.commit_work);
8794 static void intel_atomic_commit_work(struct work_struct *work)
8796 struct intel_atomic_state *state =
8797 container_of(work, struct intel_atomic_state, base.commit_work);
8799 intel_atomic_commit_tail(state);
8802 static int __i915_sw_fence_call
8803 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8804 enum i915_sw_fence_notify notify)
8806 struct intel_atomic_state *state =
8807 container_of(fence, struct intel_atomic_state, commit_ready);
8810 case FENCE_COMPLETE:
8811 /* we do blocking waits in the worker, nothing to do here */
8815 struct intel_atomic_helper *helper =
8816 &to_i915(state->base.dev)->atomic_helper;
8818 if (llist_add(&state->freed, &helper->free_list))
8819 schedule_work(&helper->free_work);
8827 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8829 struct intel_plane_state *old_plane_state, *new_plane_state;
8830 struct intel_plane *plane;
8833 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8835 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8836 to_intel_frontbuffer(new_plane_state->hw.fb),
8837 plane->frontbuffer_bit);
8840 static int intel_atomic_commit(struct drm_device *dev,
8841 struct drm_atomic_state *_state,
8844 struct intel_atomic_state *state = to_intel_atomic_state(_state);
8845 struct drm_i915_private *dev_priv = to_i915(dev);
8848 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8850 drm_atomic_state_get(&state->base);
8851 i915_sw_fence_init(&state->commit_ready,
8852 intel_atomic_commit_ready);
8855 * The intel_legacy_cursor_update() fast path takes care
8856 * of avoiding the vblank waits for simple cursor
8857 * movement and flips. For cursor on/off and size changes,
8858 * we want to perform the vblank waits so that watermark
8859 * updates happen during the correct frames. Gen9+ have
8860 * double buffered watermarks and so shouldn't need this.
8862 * Unset state->legacy_cursor_update before the call to
8863 * drm_atomic_helper_setup_commit() because otherwise
8864 * drm_atomic_helper_wait_for_flip_done() is a noop and
8865 * we get FIFO underruns because we didn't wait
8868 * FIXME doing watermarks and fb cleanup from a vblank worker
8869 * (assuming we had any) would solve these problems.
8871 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8872 struct intel_crtc_state *new_crtc_state;
8873 struct intel_crtc *crtc;
8876 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8877 if (new_crtc_state->wm.need_postvbl_update ||
8878 new_crtc_state->update_wm_post)
8879 state->base.legacy_cursor_update = false;
8882 ret = intel_atomic_prepare_commit(state);
8884 drm_dbg_atomic(&dev_priv->drm,
8885 "Preparing state failed with %i\n", ret);
8886 i915_sw_fence_commit(&state->commit_ready);
8887 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8891 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8893 ret = drm_atomic_helper_swap_state(&state->base, true);
8895 intel_atomic_swap_global_state(state);
8898 struct intel_crtc_state *new_crtc_state;
8899 struct intel_crtc *crtc;
8902 i915_sw_fence_commit(&state->commit_ready);
8904 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8905 intel_dsb_cleanup(new_crtc_state);
8907 drm_atomic_helper_cleanup_planes(dev, &state->base);
8908 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8911 intel_shared_dpll_swap_state(state);
8912 intel_atomic_track_fbs(state);
8914 drm_atomic_state_get(&state->base);
8915 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8917 i915_sw_fence_commit(&state->commit_ready);
8918 if (nonblock && state->modeset) {
8919 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8920 } else if (nonblock) {
8921 queue_work(dev_priv->flip_wq, &state->base.commit_work);
8924 flush_workqueue(dev_priv->modeset_wq);
8925 intel_atomic_commit_tail(state);
8932 * intel_plane_destroy - destroy a plane
8933 * @plane: plane to destroy
8935 * Common destruction function for all types of planes (primary, cursor,
8938 void intel_plane_destroy(struct drm_plane *plane)
8940 drm_plane_cleanup(plane);
8941 kfree(to_intel_plane(plane));
8944 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8946 struct intel_plane *plane;
8948 for_each_intel_plane(&dev_priv->drm, plane) {
8949 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8952 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8957 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8958 struct drm_file *file)
8960 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8961 struct drm_crtc *drmmode_crtc;
8962 struct intel_crtc *crtc;
8964 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8968 crtc = to_intel_crtc(drmmode_crtc);
8969 pipe_from_crtc_id->pipe = crtc->pipe;
8974 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8976 struct drm_device *dev = encoder->base.dev;
8977 struct intel_encoder *source_encoder;
8978 u32 possible_clones = 0;
8980 for_each_intel_encoder(dev, source_encoder) {
8981 if (encoders_cloneable(encoder, source_encoder))
8982 possible_clones |= drm_encoder_mask(&source_encoder->base);
8985 return possible_clones;
8988 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8990 struct drm_device *dev = encoder->base.dev;
8991 struct intel_crtc *crtc;
8992 u32 possible_crtcs = 0;
8994 for_each_intel_crtc(dev, crtc) {
8995 if (encoder->pipe_mask & BIT(crtc->pipe))
8996 possible_crtcs |= drm_crtc_mask(&crtc->base);
8999 return possible_crtcs;
9002 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
9004 if (!IS_MOBILE(dev_priv))
9007 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
9010 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
9016 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
9018 if (DISPLAY_VER(dev_priv) >= 9)
9021 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
9024 if (HAS_PCH_LPT_H(dev_priv) &&
9025 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
9028 /* DDI E can't be used if DDI A requires 4 lanes */
9029 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9032 if (!dev_priv->vbt.int_crt_support)
9038 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9040 struct intel_encoder *encoder;
9041 bool dpd_is_edp = false;
9043 intel_pps_unlock_regs_wa(dev_priv);
9045 if (!HAS_DISPLAY(dev_priv))
9048 if (IS_DG2(dev_priv)) {
9049 intel_ddi_init(dev_priv, PORT_A);
9050 intel_ddi_init(dev_priv, PORT_B);
9051 intel_ddi_init(dev_priv, PORT_C);
9052 intel_ddi_init(dev_priv, PORT_D_XELPD);
9053 } else if (IS_ALDERLAKE_P(dev_priv)) {
9054 intel_ddi_init(dev_priv, PORT_A);
9055 intel_ddi_init(dev_priv, PORT_B);
9056 intel_ddi_init(dev_priv, PORT_TC1);
9057 intel_ddi_init(dev_priv, PORT_TC2);
9058 intel_ddi_init(dev_priv, PORT_TC3);
9059 intel_ddi_init(dev_priv, PORT_TC4);
9060 icl_dsi_init(dev_priv);
9061 } else if (IS_ALDERLAKE_S(dev_priv)) {
9062 intel_ddi_init(dev_priv, PORT_A);
9063 intel_ddi_init(dev_priv, PORT_TC1);
9064 intel_ddi_init(dev_priv, PORT_TC2);
9065 intel_ddi_init(dev_priv, PORT_TC3);
9066 intel_ddi_init(dev_priv, PORT_TC4);
9067 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9068 intel_ddi_init(dev_priv, PORT_A);
9069 intel_ddi_init(dev_priv, PORT_B);
9070 intel_ddi_init(dev_priv, PORT_TC1);
9071 intel_ddi_init(dev_priv, PORT_TC2);
9072 } else if (DISPLAY_VER(dev_priv) >= 12) {
9073 intel_ddi_init(dev_priv, PORT_A);
9074 intel_ddi_init(dev_priv, PORT_B);
9075 intel_ddi_init(dev_priv, PORT_TC1);
9076 intel_ddi_init(dev_priv, PORT_TC2);
9077 intel_ddi_init(dev_priv, PORT_TC3);
9078 intel_ddi_init(dev_priv, PORT_TC4);
9079 intel_ddi_init(dev_priv, PORT_TC5);
9080 intel_ddi_init(dev_priv, PORT_TC6);
9081 icl_dsi_init(dev_priv);
9082 } else if (IS_JSL_EHL(dev_priv)) {
9083 intel_ddi_init(dev_priv, PORT_A);
9084 intel_ddi_init(dev_priv, PORT_B);
9085 intel_ddi_init(dev_priv, PORT_C);
9086 intel_ddi_init(dev_priv, PORT_D);
9087 icl_dsi_init(dev_priv);
9088 } else if (DISPLAY_VER(dev_priv) == 11) {
9089 intel_ddi_init(dev_priv, PORT_A);
9090 intel_ddi_init(dev_priv, PORT_B);
9091 intel_ddi_init(dev_priv, PORT_C);
9092 intel_ddi_init(dev_priv, PORT_D);
9093 intel_ddi_init(dev_priv, PORT_E);
9094 intel_ddi_init(dev_priv, PORT_F);
9095 icl_dsi_init(dev_priv);
9096 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9097 intel_ddi_init(dev_priv, PORT_A);
9098 intel_ddi_init(dev_priv, PORT_B);
9099 intel_ddi_init(dev_priv, PORT_C);
9100 vlv_dsi_init(dev_priv);
9101 } else if (DISPLAY_VER(dev_priv) >= 9) {
9102 intel_ddi_init(dev_priv, PORT_A);
9103 intel_ddi_init(dev_priv, PORT_B);
9104 intel_ddi_init(dev_priv, PORT_C);
9105 intel_ddi_init(dev_priv, PORT_D);
9106 intel_ddi_init(dev_priv, PORT_E);
9107 } else if (HAS_DDI(dev_priv)) {
9110 if (intel_ddi_crt_present(dev_priv))
9111 intel_crt_init(dev_priv);
9113 /* Haswell uses DDI functions to detect digital outputs. */
9114 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9116 intel_ddi_init(dev_priv, PORT_A);
9118 found = intel_de_read(dev_priv, SFUSE_STRAP);
9119 if (found & SFUSE_STRAP_DDIB_DETECTED)
9120 intel_ddi_init(dev_priv, PORT_B);
9121 if (found & SFUSE_STRAP_DDIC_DETECTED)
9122 intel_ddi_init(dev_priv, PORT_C);
9123 if (found & SFUSE_STRAP_DDID_DETECTED)
9124 intel_ddi_init(dev_priv, PORT_D);
9125 if (found & SFUSE_STRAP_DDIF_DETECTED)
9126 intel_ddi_init(dev_priv, PORT_F);
9127 } else if (HAS_PCH_SPLIT(dev_priv)) {
9131 * intel_edp_init_connector() depends on this completing first,
9132 * to prevent the registration of both eDP and LVDS and the
9133 * incorrect sharing of the PPS.
9135 intel_lvds_init(dev_priv);
9136 intel_crt_init(dev_priv);
9138 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9140 if (ilk_has_edp_a(dev_priv))
9141 g4x_dp_init(dev_priv, DP_A, PORT_A);
9143 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9144 /* PCH SDVOB multiplex with HDMIB */
9145 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9147 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9148 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9149 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9152 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9153 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9155 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9156 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9158 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9159 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9161 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9162 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9163 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9164 bool has_edp, has_port;
9166 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9167 intel_crt_init(dev_priv);
9170 * The DP_DETECTED bit is the latched state of the DDC
9171 * SDA pin at boot. However since eDP doesn't require DDC
9172 * (no way to plug in a DP->HDMI dongle) the DDC pins for
9173 * eDP ports may have been muxed to an alternate function.
9174 * Thus we can't rely on the DP_DETECTED bit alone to detect
9175 * eDP ports. Consult the VBT as well as DP_DETECTED to
9178 * Sadly the straps seem to be missing sometimes even for HDMI
9179 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9180 * and VBT for the presence of the port. Additionally we can't
9181 * trust the port type the VBT declares as we've seen at least
9182 * HDMI ports that the VBT claim are DP or eDP.
9184 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9185 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9186 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9187 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9188 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9189 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9191 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9192 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9193 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9194 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9195 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9196 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9198 if (IS_CHERRYVIEW(dev_priv)) {
9200 * eDP not supported on port D,
9201 * so no need to worry about it
9203 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9204 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9205 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9206 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9207 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9210 vlv_dsi_init(dev_priv);
9211 } else if (IS_PINEVIEW(dev_priv)) {
9212 intel_lvds_init(dev_priv);
9213 intel_crt_init(dev_priv);
9214 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9217 if (IS_MOBILE(dev_priv))
9218 intel_lvds_init(dev_priv);
9220 intel_crt_init(dev_priv);
9222 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9223 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9224 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9225 if (!found && IS_G4X(dev_priv)) {
9226 drm_dbg_kms(&dev_priv->drm,
9227 "probing HDMI on SDVOB\n");
9228 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9231 if (!found && IS_G4X(dev_priv))
9232 g4x_dp_init(dev_priv, DP_B, PORT_B);
9235 /* Before G4X SDVOC doesn't have its own detect register */
9237 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9238 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9239 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9242 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9244 if (IS_G4X(dev_priv)) {
9245 drm_dbg_kms(&dev_priv->drm,
9246 "probing HDMI on SDVOC\n");
9247 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9249 if (IS_G4X(dev_priv))
9250 g4x_dp_init(dev_priv, DP_C, PORT_C);
9253 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9254 g4x_dp_init(dev_priv, DP_D, PORT_D);
9256 if (SUPPORTS_TV(dev_priv))
9257 intel_tv_init(dev_priv);
9258 } else if (DISPLAY_VER(dev_priv) == 2) {
9259 if (IS_I85X(dev_priv))
9260 intel_lvds_init(dev_priv);
9262 intel_crt_init(dev_priv);
9263 intel_dvo_init(dev_priv);
9266 for_each_intel_encoder(&dev_priv->drm, encoder) {
9267 encoder->base.possible_crtcs =
9268 intel_encoder_possible_crtcs(encoder);
9269 encoder->base.possible_clones =
9270 intel_encoder_possible_clones(encoder);
9273 intel_init_pch_refclk(dev_priv);
9275 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9278 static enum drm_mode_status
9279 intel_mode_valid(struct drm_device *dev,
9280 const struct drm_display_mode *mode)
9282 struct drm_i915_private *dev_priv = to_i915(dev);
9283 int hdisplay_max, htotal_max;
9284 int vdisplay_max, vtotal_max;
9287 * Can't reject DBLSCAN here because Xorg ddxen can add piles
9288 * of DBLSCAN modes to the output's mode list when they detect
9289 * the scaling mode property on the connector. And they don't
9290 * ask the kernel to validate those modes in any way until
9291 * modeset time at which point the client gets a protocol error.
9292 * So in order to not upset those clients we silently ignore the
9293 * DBLSCAN flag on such connectors. For other connectors we will
9294 * reject modes with the DBLSCAN flag in encoder->compute_config().
9295 * And we always reject DBLSCAN modes in connector->mode_valid()
9296 * as we never want such modes on the connector's mode list.
9299 if (mode->vscan > 1)
9300 return MODE_NO_VSCAN;
9302 if (mode->flags & DRM_MODE_FLAG_HSKEW)
9303 return MODE_H_ILLEGAL;
9305 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9306 DRM_MODE_FLAG_NCSYNC |
9307 DRM_MODE_FLAG_PCSYNC))
9310 if (mode->flags & (DRM_MODE_FLAG_BCAST |
9311 DRM_MODE_FLAG_PIXMUX |
9312 DRM_MODE_FLAG_CLKDIV2))
9315 /* Transcoder timing limits */
9316 if (DISPLAY_VER(dev_priv) >= 11) {
9317 hdisplay_max = 16384;
9318 vdisplay_max = 8192;
9321 } else if (DISPLAY_VER(dev_priv) >= 9 ||
9322 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9323 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9324 vdisplay_max = 4096;
9327 } else if (DISPLAY_VER(dev_priv) >= 3) {
9328 hdisplay_max = 4096;
9329 vdisplay_max = 4096;
9333 hdisplay_max = 2048;
9334 vdisplay_max = 2048;
9339 if (mode->hdisplay > hdisplay_max ||
9340 mode->hsync_start > htotal_max ||
9341 mode->hsync_end > htotal_max ||
9342 mode->htotal > htotal_max)
9343 return MODE_H_ILLEGAL;
9345 if (mode->vdisplay > vdisplay_max ||
9346 mode->vsync_start > vtotal_max ||
9347 mode->vsync_end > vtotal_max ||
9348 mode->vtotal > vtotal_max)
9349 return MODE_V_ILLEGAL;
9351 if (DISPLAY_VER(dev_priv) >= 5) {
9352 if (mode->hdisplay < 64 ||
9353 mode->htotal - mode->hdisplay < 32)
9354 return MODE_H_ILLEGAL;
9356 if (mode->vtotal - mode->vdisplay < 5)
9357 return MODE_V_ILLEGAL;
9359 if (mode->htotal - mode->hdisplay < 32)
9360 return MODE_H_ILLEGAL;
9362 if (mode->vtotal - mode->vdisplay < 3)
9363 return MODE_V_ILLEGAL;
9367 * Cantiga+ cannot handle modes with a hsync front porch of 0.
9368 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9370 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9371 mode->hsync_start == mode->hdisplay)
9372 return MODE_H_ILLEGAL;
9377 enum drm_mode_status
9378 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9379 const struct drm_display_mode *mode,
9382 int plane_width_max, plane_height_max;
9385 * intel_mode_valid() should be
9386 * sufficient on older platforms.
9388 if (DISPLAY_VER(dev_priv) < 9)
9392 * Most people will probably want a fullscreen
9393 * plane so let's not advertize modes that are
9396 if (DISPLAY_VER(dev_priv) >= 11) {
9397 plane_width_max = 5120 << bigjoiner;
9398 plane_height_max = 4320;
9400 plane_width_max = 5120;
9401 plane_height_max = 4096;
9404 if (mode->hdisplay > plane_width_max)
9405 return MODE_H_ILLEGAL;
9407 if (mode->vdisplay > plane_height_max)
9408 return MODE_V_ILLEGAL;
9413 static const struct drm_mode_config_funcs intel_mode_funcs = {
9414 .fb_create = intel_user_framebuffer_create,
9415 .get_format_info = intel_fb_get_format_info,
9416 .output_poll_changed = intel_fbdev_output_poll_changed,
9417 .mode_valid = intel_mode_valid,
9418 .atomic_check = intel_atomic_check,
9419 .atomic_commit = intel_atomic_commit,
9420 .atomic_state_alloc = intel_atomic_state_alloc,
9421 .atomic_state_clear = intel_atomic_state_clear,
9422 .atomic_state_free = intel_atomic_state_free,
9425 static const struct drm_i915_display_funcs skl_display_funcs = {
9426 .get_pipe_config = hsw_get_pipe_config,
9427 .crtc_enable = hsw_crtc_enable,
9428 .crtc_disable = hsw_crtc_disable,
9429 .commit_modeset_enables = skl_commit_modeset_enables,
9430 .get_initial_plane_config = skl_get_initial_plane_config,
9433 static const struct drm_i915_display_funcs ddi_display_funcs = {
9434 .get_pipe_config = hsw_get_pipe_config,
9435 .crtc_enable = hsw_crtc_enable,
9436 .crtc_disable = hsw_crtc_disable,
9437 .commit_modeset_enables = intel_commit_modeset_enables,
9438 .get_initial_plane_config = i9xx_get_initial_plane_config,
9441 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9442 .get_pipe_config = ilk_get_pipe_config,
9443 .crtc_enable = ilk_crtc_enable,
9444 .crtc_disable = ilk_crtc_disable,
9445 .commit_modeset_enables = intel_commit_modeset_enables,
9446 .get_initial_plane_config = i9xx_get_initial_plane_config,
9449 static const struct drm_i915_display_funcs vlv_display_funcs = {
9450 .get_pipe_config = i9xx_get_pipe_config,
9451 .crtc_enable = valleyview_crtc_enable,
9452 .crtc_disable = i9xx_crtc_disable,
9453 .commit_modeset_enables = intel_commit_modeset_enables,
9454 .get_initial_plane_config = i9xx_get_initial_plane_config,
9457 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9458 .get_pipe_config = i9xx_get_pipe_config,
9459 .crtc_enable = i9xx_crtc_enable,
9460 .crtc_disable = i9xx_crtc_disable,
9461 .commit_modeset_enables = intel_commit_modeset_enables,
9462 .get_initial_plane_config = i9xx_get_initial_plane_config,
9466 * intel_init_display_hooks - initialize the display modesetting hooks
9467 * @dev_priv: device private
9469 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9471 if (!HAS_DISPLAY(dev_priv))
9474 intel_init_cdclk_hooks(dev_priv);
9475 intel_audio_hooks_init(dev_priv);
9477 intel_dpll_init_clock_hook(dev_priv);
9479 if (DISPLAY_VER(dev_priv) >= 9) {
9480 dev_priv->display = &skl_display_funcs;
9481 } else if (HAS_DDI(dev_priv)) {
9482 dev_priv->display = &ddi_display_funcs;
9483 } else if (HAS_PCH_SPLIT(dev_priv)) {
9484 dev_priv->display = &pch_split_display_funcs;
9485 } else if (IS_CHERRYVIEW(dev_priv) ||
9486 IS_VALLEYVIEW(dev_priv)) {
9487 dev_priv->display = &vlv_display_funcs;
9489 dev_priv->display = &i9xx_display_funcs;
9492 intel_fdi_init_hook(dev_priv);
9495 void intel_modeset_init_hw(struct drm_i915_private *i915)
9497 struct intel_cdclk_state *cdclk_state;
9499 if (!HAS_DISPLAY(i915))
9502 cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9504 intel_update_cdclk(i915);
9505 intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
9506 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9509 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9511 struct drm_plane *plane;
9512 struct intel_crtc *crtc;
9514 for_each_intel_crtc(state->dev, crtc) {
9515 struct intel_crtc_state *crtc_state;
9517 crtc_state = intel_atomic_get_crtc_state(state, crtc);
9518 if (IS_ERR(crtc_state))
9519 return PTR_ERR(crtc_state);
9521 if (crtc_state->hw.active) {
9523 * Preserve the inherited flag to avoid
9524 * taking the full modeset path.
9526 crtc_state->inherited = true;
9530 drm_for_each_plane(plane, state->dev) {
9531 struct drm_plane_state *plane_state;
9533 plane_state = drm_atomic_get_plane_state(state, plane);
9534 if (IS_ERR(plane_state))
9535 return PTR_ERR(plane_state);
9542 * Calculate what we think the watermarks should be for the state we've read
9543 * out of the hardware and then immediately program those watermarks so that
9544 * we ensure the hardware settings match our internal state.
9546 * We can calculate what we think WM's should be by creating a duplicate of the
9547 * current state (which was constructed during hardware readout) and running it
9548 * through the atomic check code to calculate new watermark values in the
9551 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9553 struct drm_atomic_state *state;
9554 struct intel_atomic_state *intel_state;
9555 struct intel_crtc *crtc;
9556 struct intel_crtc_state *crtc_state;
9557 struct drm_modeset_acquire_ctx ctx;
9561 /* Only supported on platforms that use atomic watermark design */
9562 if (!dev_priv->wm_disp->optimize_watermarks)
9565 state = drm_atomic_state_alloc(&dev_priv->drm);
9566 if (drm_WARN_ON(&dev_priv->drm, !state))
9569 intel_state = to_intel_atomic_state(state);
9571 drm_modeset_acquire_init(&ctx, 0);
9574 state->acquire_ctx = &ctx;
9577 * Hardware readout is the only time we don't want to calculate
9578 * intermediate watermarks (since we don't trust the current
9581 if (!HAS_GMCH(dev_priv))
9582 intel_state->skip_intermediate_wm = true;
9584 ret = sanitize_watermarks_add_affected(state);
9588 ret = intel_atomic_check(&dev_priv->drm, state);
9592 /* Write calculated watermark values back */
9593 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9594 crtc_state->wm.need_postvbl_update = true;
9595 intel_optimize_watermarks(intel_state, crtc);
9597 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9601 if (ret == -EDEADLK) {
9602 drm_atomic_state_clear(state);
9603 drm_modeset_backoff(&ctx);
9608 * If we fail here, it means that the hardware appears to be
9609 * programmed in a way that shouldn't be possible, given our
9610 * understanding of watermark requirements. This might mean a
9611 * mistake in the hardware readout code or a mistake in the
9612 * watermark calculations for a given platform. Raise a WARN
9613 * so that this is noticeable.
9615 * If this actually happens, we'll have to just leave the
9616 * BIOS-programmed watermarks untouched and hope for the best.
9618 drm_WARN(&dev_priv->drm, ret,
9619 "Could not determine valid watermarks for inherited state\n");
9621 drm_atomic_state_put(state);
9623 drm_modeset_drop_locks(&ctx);
9624 drm_modeset_acquire_fini(&ctx);
9627 static int intel_initial_commit(struct drm_device *dev)
9629 struct drm_atomic_state *state = NULL;
9630 struct drm_modeset_acquire_ctx ctx;
9631 struct intel_crtc *crtc;
9634 state = drm_atomic_state_alloc(dev);
9638 drm_modeset_acquire_init(&ctx, 0);
9641 state->acquire_ctx = &ctx;
9643 for_each_intel_crtc(dev, crtc) {
9644 struct intel_crtc_state *crtc_state =
9645 intel_atomic_get_crtc_state(state, crtc);
9647 if (IS_ERR(crtc_state)) {
9648 ret = PTR_ERR(crtc_state);
9652 if (crtc_state->hw.active) {
9653 struct intel_encoder *encoder;
9656 * We've not yet detected sink capabilities
9657 * (audio,infoframes,etc.) and thus we don't want to
9658 * force a full state recomputation yet. We want that to
9659 * happen only for the first real commit from userspace.
9660 * So preserve the inherited flag for the time being.
9662 crtc_state->inherited = true;
9664 ret = drm_atomic_add_affected_planes(state, &crtc->base);
9669 * FIXME hack to force a LUT update to avoid the
9670 * plane update forcing the pipe gamma on without
9671 * having a proper LUT loaded. Remove once we
9672 * have readout for pipe gamma enable.
9674 crtc_state->uapi.color_mgmt_changed = true;
9676 for_each_intel_encoder_mask(dev, encoder,
9677 crtc_state->uapi.encoder_mask) {
9678 if (encoder->initial_fastset_check &&
9679 !encoder->initial_fastset_check(encoder, crtc_state)) {
9680 ret = drm_atomic_add_affected_connectors(state,
9689 ret = drm_atomic_commit(state);
9692 if (ret == -EDEADLK) {
9693 drm_atomic_state_clear(state);
9694 drm_modeset_backoff(&ctx);
9698 drm_atomic_state_put(state);
9700 drm_modeset_drop_locks(&ctx);
9701 drm_modeset_acquire_fini(&ctx);
9706 static void intel_mode_config_init(struct drm_i915_private *i915)
9708 struct drm_mode_config *mode_config = &i915->drm.mode_config;
9710 drm_mode_config_init(&i915->drm);
9711 INIT_LIST_HEAD(&i915->global_obj_list);
9713 mode_config->min_width = 0;
9714 mode_config->min_height = 0;
9716 mode_config->preferred_depth = 24;
9717 mode_config->prefer_shadow = 1;
9719 mode_config->funcs = &intel_mode_funcs;
9721 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9724 * Maximum framebuffer dimensions, chosen to match
9725 * the maximum render engine surface size on gen4+.
9727 if (DISPLAY_VER(i915) >= 7) {
9728 mode_config->max_width = 16384;
9729 mode_config->max_height = 16384;
9730 } else if (DISPLAY_VER(i915) >= 4) {
9731 mode_config->max_width = 8192;
9732 mode_config->max_height = 8192;
9733 } else if (DISPLAY_VER(i915) == 3) {
9734 mode_config->max_width = 4096;
9735 mode_config->max_height = 4096;
9737 mode_config->max_width = 2048;
9738 mode_config->max_height = 2048;
9741 if (IS_I845G(i915) || IS_I865G(i915)) {
9742 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9743 mode_config->cursor_height = 1023;
9744 } else if (IS_I830(i915) || IS_I85X(i915) ||
9745 IS_I915G(i915) || IS_I915GM(i915)) {
9746 mode_config->cursor_width = 64;
9747 mode_config->cursor_height = 64;
9749 mode_config->cursor_width = 256;
9750 mode_config->cursor_height = 256;
9754 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9756 intel_atomic_global_obj_cleanup(i915);
9757 drm_mode_config_cleanup(&i915->drm);
9760 /* part #1: call before irq install */
9761 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9765 if (i915_inject_probe_failure(i915))
9768 if (HAS_DISPLAY(i915)) {
9769 ret = drm_vblank_init(&i915->drm,
9770 INTEL_NUM_PIPES(i915));
9775 intel_bios_init(i915);
9777 ret = intel_vga_register(i915);
9781 /* FIXME: completely on the wrong abstraction layer */
9782 intel_power_domains_init_hw(i915, false);
9784 if (!HAS_DISPLAY(i915))
9787 intel_dmc_ucode_init(i915);
9789 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9790 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9791 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9793 i915->framestart_delay = 1; /* 1-4 */
9795 i915->window2_delay = 0; /* No DSB so no window2 delay */
9797 intel_mode_config_init(i915);
9799 ret = intel_cdclk_init(i915);
9801 goto cleanup_vga_client_pw_domain_dmc;
9803 ret = intel_dbuf_init(i915);
9805 goto cleanup_vga_client_pw_domain_dmc;
9807 ret = intel_bw_init(i915);
9809 goto cleanup_vga_client_pw_domain_dmc;
9811 init_llist_head(&i915->atomic_helper.free_list);
9812 INIT_WORK(&i915->atomic_helper.free_work,
9813 intel_atomic_helper_free_state_worker);
9815 intel_init_quirks(i915);
9817 intel_fbc_init(i915);
9821 cleanup_vga_client_pw_domain_dmc:
9822 intel_dmc_ucode_fini(i915);
9823 intel_power_domains_driver_remove(i915);
9824 intel_vga_unregister(i915);
9826 intel_bios_driver_remove(i915);
9831 /* part #2: call after irq install, but before gem init */
9832 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9834 struct drm_device *dev = &i915->drm;
9836 struct intel_crtc *crtc;
9839 if (!HAS_DISPLAY(i915))
9842 intel_init_pm(i915);
9844 intel_panel_sanitize_ssc(i915);
9846 intel_pps_setup(i915);
9848 intel_gmbus_setup(i915);
9850 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9851 INTEL_NUM_PIPES(i915),
9852 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9854 for_each_pipe(i915, pipe) {
9855 ret = intel_crtc_init(i915, pipe);
9857 intel_mode_config_cleanup(i915);
9862 intel_plane_possible_crtcs_init(i915);
9863 intel_shared_dpll_init(dev);
9864 intel_fdi_pll_freq_update(i915);
9866 intel_update_czclk(i915);
9867 intel_modeset_init_hw(i915);
9868 intel_dpll_update_ref_clks(i915);
9870 intel_hdcp_component_init(i915);
9872 if (i915->max_cdclk_freq == 0)
9873 intel_update_max_cdclk(i915);
9876 * If the platform has HTI, we need to find out whether it has reserved
9877 * any display resources before we create our display outputs.
9879 if (INTEL_INFO(i915)->display.has_hti)
9880 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9882 /* Just disable it once at startup */
9883 intel_vga_disable(i915);
9884 intel_setup_outputs(i915);
9886 drm_modeset_lock_all(dev);
9887 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9888 intel_acpi_assign_connector_fwnodes(i915);
9889 drm_modeset_unlock_all(dev);
9891 for_each_intel_crtc(dev, crtc) {
9892 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9894 intel_crtc_initial_plane_config(crtc);
9898 * Make sure hardware watermarks really match the state we read out.
9899 * Note that we need to do this after reconstructing the BIOS fb's
9900 * since the watermark calculation done here will use pstate->fb.
9902 if (!HAS_GMCH(i915))
9903 sanitize_watermarks(i915);
9908 /* part #3: call after gem init */
9909 int intel_modeset_init(struct drm_i915_private *i915)
9913 if (!HAS_DISPLAY(i915))
9917 * Force all active planes to recompute their states. So that on
9918 * mode_setcrtc after probe, all the intel_plane_state variables
9919 * are already calculated and there is no assert_plane warnings
9922 ret = intel_initial_commit(&i915->drm);
9924 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9926 intel_overlay_setup(i915);
9928 ret = intel_fbdev_init(&i915->drm);
9932 /* Only enable hotplug handling once the fbdev is fully set up. */
9933 intel_hpd_init(i915);
9934 intel_hpd_poll_disable(i915);
9936 intel_init_ipc(i915);
9941 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9943 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9944 /* 640x480@60Hz, ~25175 kHz */
9945 struct dpll clock = {
9955 drm_WARN_ON(&dev_priv->drm,
9956 i9xx_calc_dpll_params(48000, &clock) != 25154);
9958 drm_dbg_kms(&dev_priv->drm,
9959 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9960 pipe_name(pipe), clock.vco, clock.dot);
9962 fp = i9xx_dpll_compute_fp(&clock);
9963 dpll = DPLL_DVO_2X_MODE |
9965 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9966 PLL_P2_DIVIDE_BY_4 |
9967 PLL_REF_INPUT_DREFCLK |
9970 intel_de_write(dev_priv, FP0(pipe), fp);
9971 intel_de_write(dev_priv, FP1(pipe), fp);
9973 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9974 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9975 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9976 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9977 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9978 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9979 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9982 * Apparently we need to have VGA mode enabled prior to changing
9983 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9984 * dividers, even though the register value does change.
9986 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9987 intel_de_write(dev_priv, DPLL(pipe), dpll);
9989 /* Wait for the clocks to stabilize. */
9990 intel_de_posting_read(dev_priv, DPLL(pipe));
9993 /* The pixel multiplier can only be updated once the
9994 * DPLL is enabled and the clocks are stable.
9996 * So write it again.
9998 intel_de_write(dev_priv, DPLL(pipe), dpll);
10000 /* We do this three times for luck */
10001 for (i = 0; i < 3 ; i++) {
10002 intel_de_write(dev_priv, DPLL(pipe), dpll);
10003 intel_de_posting_read(dev_priv, DPLL(pipe));
10004 udelay(150); /* wait for warmup */
10007 intel_de_write(dev_priv, PIPECONF(pipe),
10008 PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
10009 intel_de_posting_read(dev_priv, PIPECONF(pipe));
10011 intel_wait_for_pipe_scanline_moving(crtc);
10014 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
10016 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
10018 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
10021 drm_WARN_ON(&dev_priv->drm,
10022 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
10023 drm_WARN_ON(&dev_priv->drm,
10024 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
10025 drm_WARN_ON(&dev_priv->drm,
10026 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
10027 drm_WARN_ON(&dev_priv->drm,
10028 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
10029 drm_WARN_ON(&dev_priv->drm,
10030 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
10032 intel_de_write(dev_priv, PIPECONF(pipe), 0);
10033 intel_de_posting_read(dev_priv, PIPECONF(pipe));
10035 intel_wait_for_pipe_scanline_stopped(crtc);
10037 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10038 intel_de_posting_read(dev_priv, DPLL(pipe));
10042 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10044 struct intel_crtc *crtc;
10046 if (DISPLAY_VER(dev_priv) >= 4)
10049 for_each_intel_crtc(&dev_priv->drm, crtc) {
10050 struct intel_plane *plane =
10051 to_intel_plane(crtc->base.primary);
10052 struct intel_crtc *plane_crtc;
10055 if (!plane->get_hw_state(plane, &pipe))
10058 if (pipe == crtc->pipe)
10061 drm_dbg_kms(&dev_priv->drm,
10062 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10063 plane->base.base.id, plane->base.name);
10065 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
10066 intel_plane_disable_noatomic(plane_crtc, plane);
10070 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10072 struct drm_device *dev = crtc->base.dev;
10073 struct intel_encoder *encoder;
10075 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10081 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10083 struct drm_device *dev = encoder->base.dev;
10084 struct intel_connector *connector;
10086 for_each_connector_on_encoder(dev, &encoder->base, connector)
10092 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10093 enum pipe pch_transcoder)
10095 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10096 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10099 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10101 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10102 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10103 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10105 if (DISPLAY_VER(dev_priv) >= 9 ||
10106 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10107 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10110 if (transcoder_is_dsi(cpu_transcoder))
10113 val = intel_de_read(dev_priv, reg);
10114 val &= ~HSW_FRAME_START_DELAY_MASK;
10115 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10116 intel_de_write(dev_priv, reg, val);
10118 i915_reg_t reg = PIPECONF(cpu_transcoder);
10121 val = intel_de_read(dev_priv, reg);
10122 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10123 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10124 intel_de_write(dev_priv, reg, val);
10127 if (!crtc_state->has_pch_encoder)
10130 if (HAS_PCH_IBX(dev_priv)) {
10131 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10134 val = intel_de_read(dev_priv, reg);
10135 val &= ~TRANS_FRAME_START_DELAY_MASK;
10136 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10137 intel_de_write(dev_priv, reg, val);
10139 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10140 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10143 val = intel_de_read(dev_priv, reg);
10144 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10145 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10146 intel_de_write(dev_priv, reg, val);
10150 static void intel_sanitize_crtc(struct intel_crtc *crtc,
10151 struct drm_modeset_acquire_ctx *ctx)
10153 struct drm_device *dev = crtc->base.dev;
10154 struct drm_i915_private *dev_priv = to_i915(dev);
10155 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10157 if (crtc_state->hw.active) {
10158 struct intel_plane *plane;
10160 /* Clear any frame start delays used for debugging left by the BIOS */
10161 intel_sanitize_frame_start_delay(crtc_state);
10163 /* Disable everything but the primary plane */
10164 for_each_intel_plane_on_crtc(dev, crtc, plane) {
10165 const struct intel_plane_state *plane_state =
10166 to_intel_plane_state(plane->base.state);
10168 if (plane_state->uapi.visible &&
10169 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10170 intel_plane_disable_noatomic(crtc, plane);
10173 /* Disable any background color/etc. set by the BIOS */
10174 intel_color_commit(crtc_state);
10177 /* Adjust the state of the output pipe according to whether we
10178 * have active connectors/encoders. */
10179 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10180 !crtc_state->bigjoiner_slave)
10181 intel_crtc_disable_noatomic(crtc, ctx);
10183 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10185 * We start out with underrun reporting disabled to avoid races.
10186 * For correct bookkeeping mark this on active crtcs.
10188 * Also on gmch platforms we dont have any hardware bits to
10189 * disable the underrun reporting. Which means we need to start
10190 * out with underrun reporting disabled also on inactive pipes,
10191 * since otherwise we'll complain about the garbage we read when
10192 * e.g. coming up after runtime pm.
10194 * No protection against concurrent access is required - at
10195 * worst a fifo underrun happens which also sets this to false.
10197 crtc->cpu_fifo_underrun_disabled = true;
10199 * We track the PCH trancoder underrun reporting state
10200 * within the crtc. With crtc for pipe A housing the underrun
10201 * reporting state for PCH transcoder A, crtc for pipe B housing
10202 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10203 * and marking underrun reporting as disabled for the non-existing
10204 * PCH transcoders B and C would prevent enabling the south
10205 * error interrupt (see cpt_can_enable_serr_int()).
10207 if (has_pch_trancoder(dev_priv, crtc->pipe))
10208 crtc->pch_fifo_underrun_disabled = true;
10212 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10214 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10217 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10218 * the hardware when a high res displays plugged in. DPLL P
10219 * divider is zero, and the pipe timings are bonkers. We'll
10220 * try to disable everything in that case.
10222 * FIXME would be nice to be able to sanitize this state
10223 * without several WARNs, but for now let's take the easy
10226 return IS_SANDYBRIDGE(dev_priv) &&
10227 crtc_state->hw.active &&
10228 crtc_state->shared_dpll &&
10229 crtc_state->port_clock == 0;
10232 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10234 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10235 struct intel_connector *connector;
10236 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10237 struct intel_crtc_state *crtc_state = crtc ?
10238 to_intel_crtc_state(crtc->base.state) : NULL;
10240 /* We need to check both for a crtc link (meaning that the
10241 * encoder is active and trying to read from a pipe) and the
10242 * pipe itself being active. */
10243 bool has_active_crtc = crtc_state &&
10244 crtc_state->hw.active;
10246 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10247 drm_dbg_kms(&dev_priv->drm,
10248 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10249 pipe_name(crtc->pipe));
10250 has_active_crtc = false;
10253 connector = intel_encoder_find_connector(encoder);
10254 if (connector && !has_active_crtc) {
10255 drm_dbg_kms(&dev_priv->drm,
10256 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10257 encoder->base.base.id,
10258 encoder->base.name);
10260 /* Connector is active, but has no active pipe. This is
10261 * fallout from our resume register restoring. Disable
10262 * the encoder manually again. */
10264 struct drm_encoder *best_encoder;
10266 drm_dbg_kms(&dev_priv->drm,
10267 "[ENCODER:%d:%s] manually disabled\n",
10268 encoder->base.base.id,
10269 encoder->base.name);
10271 /* avoid oopsing in case the hooks consult best_encoder */
10272 best_encoder = connector->base.state->best_encoder;
10273 connector->base.state->best_encoder = &encoder->base;
10275 /* FIXME NULL atomic state passed! */
10276 if (encoder->disable)
10277 encoder->disable(NULL, encoder, crtc_state,
10278 connector->base.state);
10279 if (encoder->post_disable)
10280 encoder->post_disable(NULL, encoder, crtc_state,
10281 connector->base.state);
10283 connector->base.state->best_encoder = best_encoder;
10285 encoder->base.crtc = NULL;
10287 /* Inconsistent output/port/pipe state happens presumably due to
10288 * a bug in one of the get_hw_state functions. Or someplace else
10289 * in our code, like the register restore mess on resume. Clamp
10290 * things to off as a safer default. */
10292 connector->base.dpms = DRM_MODE_DPMS_OFF;
10293 connector->base.encoder = NULL;
10296 /* notify opregion of the sanitized encoder state */
10297 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10299 if (HAS_DDI(dev_priv))
10300 intel_ddi_sanitize_encoder_pll_mapping(encoder);
10303 /* FIXME read out full plane state for all planes */
10304 static void readout_plane_state(struct drm_i915_private *dev_priv)
10306 struct intel_plane *plane;
10307 struct intel_crtc *crtc;
10309 for_each_intel_plane(&dev_priv->drm, plane) {
10310 struct intel_plane_state *plane_state =
10311 to_intel_plane_state(plane->base.state);
10312 struct intel_crtc_state *crtc_state;
10313 enum pipe pipe = PIPE_A;
10316 visible = plane->get_hw_state(plane, &pipe);
10318 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10319 crtc_state = to_intel_crtc_state(crtc->base.state);
10321 intel_set_plane_visible(crtc_state, plane_state, visible);
10323 drm_dbg_kms(&dev_priv->drm,
10324 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10325 plane->base.base.id, plane->base.name,
10326 enableddisabled(visible), pipe_name(pipe));
10329 for_each_intel_crtc(&dev_priv->drm, crtc) {
10330 struct intel_crtc_state *crtc_state =
10331 to_intel_crtc_state(crtc->base.state);
10333 fixup_plane_bitmasks(crtc_state);
10337 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10339 struct drm_i915_private *dev_priv = to_i915(dev);
10340 struct intel_cdclk_state *cdclk_state =
10341 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10342 struct intel_dbuf_state *dbuf_state =
10343 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10345 struct intel_crtc *crtc;
10346 struct intel_encoder *encoder;
10347 struct intel_connector *connector;
10348 struct drm_connector_list_iter conn_iter;
10349 u8 active_pipes = 0;
10351 for_each_intel_crtc(dev, crtc) {
10352 struct intel_crtc_state *crtc_state =
10353 to_intel_crtc_state(crtc->base.state);
10355 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10356 intel_crtc_free_hw_state(crtc_state);
10357 intel_crtc_state_reset(crtc_state, crtc);
10359 intel_crtc_get_pipe_config(crtc_state);
10361 crtc_state->hw.enable = crtc_state->hw.active;
10363 crtc->base.enabled = crtc_state->hw.enable;
10364 crtc->active = crtc_state->hw.active;
10366 if (crtc_state->hw.active)
10367 active_pipes |= BIT(crtc->pipe);
10369 drm_dbg_kms(&dev_priv->drm,
10370 "[CRTC:%d:%s] hw state readout: %s\n",
10371 crtc->base.base.id, crtc->base.name,
10372 enableddisabled(crtc_state->hw.active));
10375 cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10377 readout_plane_state(dev_priv);
10379 for_each_intel_encoder(dev, encoder) {
10380 struct intel_crtc_state *crtc_state = NULL;
10384 if (encoder->get_hw_state(encoder, &pipe)) {
10385 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10386 crtc_state = to_intel_crtc_state(crtc->base.state);
10388 encoder->base.crtc = &crtc->base;
10389 intel_encoder_get_config(encoder, crtc_state);
10391 /* read out to slave crtc as well for bigjoiner */
10392 if (crtc_state->bigjoiner) {
10393 /* encoder should read be linked to bigjoiner master */
10394 WARN_ON(crtc_state->bigjoiner_slave);
10396 crtc = crtc_state->bigjoiner_linked_crtc;
10397 crtc_state = to_intel_crtc_state(crtc->base.state);
10398 intel_encoder_get_config(encoder, crtc_state);
10401 encoder->base.crtc = NULL;
10404 if (encoder->sync_state)
10405 encoder->sync_state(encoder, crtc_state);
10407 drm_dbg_kms(&dev_priv->drm,
10408 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10409 encoder->base.base.id, encoder->base.name,
10410 enableddisabled(encoder->base.crtc),
10414 intel_dpll_readout_hw_state(dev_priv);
10416 drm_connector_list_iter_begin(dev, &conn_iter);
10417 for_each_intel_connector_iter(connector, &conn_iter) {
10418 if (connector->get_hw_state(connector)) {
10419 struct intel_crtc_state *crtc_state;
10420 struct intel_crtc *crtc;
10422 connector->base.dpms = DRM_MODE_DPMS_ON;
10424 encoder = intel_attached_encoder(connector);
10425 connector->base.encoder = &encoder->base;
10427 crtc = to_intel_crtc(encoder->base.crtc);
10428 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10430 if (crtc_state && crtc_state->hw.active) {
10432 * This has to be done during hardware readout
10433 * because anything calling .crtc_disable may
10434 * rely on the connector_mask being accurate.
10436 crtc_state->uapi.connector_mask |=
10437 drm_connector_mask(&connector->base);
10438 crtc_state->uapi.encoder_mask |=
10439 drm_encoder_mask(&encoder->base);
10442 connector->base.dpms = DRM_MODE_DPMS_OFF;
10443 connector->base.encoder = NULL;
10445 drm_dbg_kms(&dev_priv->drm,
10446 "[CONNECTOR:%d:%s] hw state readout: %s\n",
10447 connector->base.base.id, connector->base.name,
10448 enableddisabled(connector->base.encoder));
10450 drm_connector_list_iter_end(&conn_iter);
10452 for_each_intel_crtc(dev, crtc) {
10453 struct intel_bw_state *bw_state =
10454 to_intel_bw_state(dev_priv->bw_obj.state);
10455 struct intel_crtc_state *crtc_state =
10456 to_intel_crtc_state(crtc->base.state);
10457 struct intel_plane *plane;
10460 if (crtc_state->hw.active) {
10462 * The initial mode needs to be set in order to keep
10463 * the atomic core happy. It wants a valid mode if the
10464 * crtc's enabled, so we do the above call.
10466 * But we don't set all the derived state fully, hence
10467 * set a flag to indicate that a full recalculation is
10468 * needed on the next commit.
10470 crtc_state->inherited = true;
10472 intel_crtc_update_active_timings(crtc_state);
10474 intel_crtc_copy_hw_to_uapi_state(crtc_state);
10477 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10478 const struct intel_plane_state *plane_state =
10479 to_intel_plane_state(plane->base.state);
10482 * FIXME don't have the fb yet, so can't
10483 * use intel_plane_data_rate() :(
10485 if (plane_state->uapi.visible)
10486 crtc_state->data_rate[plane->id] =
10487 4 * crtc_state->pixel_rate;
10489 * FIXME don't have the fb yet, so can't
10490 * use plane->min_cdclk() :(
10492 if (plane_state->uapi.visible && plane->min_cdclk) {
10493 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10494 crtc_state->min_cdclk[plane->id] =
10495 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10497 crtc_state->min_cdclk[plane->id] =
10498 crtc_state->pixel_rate;
10500 drm_dbg_kms(&dev_priv->drm,
10501 "[PLANE:%d:%s] min_cdclk %d kHz\n",
10502 plane->base.base.id, plane->base.name,
10503 crtc_state->min_cdclk[plane->id]);
10506 if (crtc_state->hw.active) {
10507 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10508 if (drm_WARN_ON(dev, min_cdclk < 0))
10512 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10513 cdclk_state->min_voltage_level[crtc->pipe] =
10514 crtc_state->min_voltage_level;
10516 intel_bw_crtc_update(bw_state, crtc_state);
10518 intel_pipe_config_sanity_check(dev_priv, crtc_state);
10523 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10525 struct intel_encoder *encoder;
10527 for_each_intel_encoder(&dev_priv->drm, encoder) {
10528 struct intel_crtc_state *crtc_state;
10530 if (!encoder->get_power_domains)
10534 * MST-primary and inactive encoders don't have a crtc state
10535 * and neither of these require any power domain references.
10537 if (!encoder->base.crtc)
10540 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10541 encoder->get_power_domains(encoder, crtc_state);
10545 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10548 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10549 * Also known as Wa_14010480278.
10551 if (IS_DISPLAY_VER(dev_priv, 10, 12))
10552 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10553 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10555 if (IS_HASWELL(dev_priv)) {
10557 * WaRsPkgCStateDisplayPMReq:hsw
10558 * System hang if this isn't done before disabling all planes!
10560 intel_de_write(dev_priv, CHICKEN_PAR1_1,
10561 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10564 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10565 /* Display WA #1142:kbl,cfl,cml */
10566 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10567 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10568 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10569 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10570 KBL_ARB_FILL_SPARE_14);
10574 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10575 enum port port, i915_reg_t hdmi_reg)
10577 u32 val = intel_de_read(dev_priv, hdmi_reg);
10579 if (val & SDVO_ENABLE ||
10580 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10583 drm_dbg_kms(&dev_priv->drm,
10584 "Sanitizing transcoder select for HDMI %c\n",
10587 val &= ~SDVO_PIPE_SEL_MASK;
10588 val |= SDVO_PIPE_SEL(PIPE_A);
10590 intel_de_write(dev_priv, hdmi_reg, val);
10593 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10594 enum port port, i915_reg_t dp_reg)
10596 u32 val = intel_de_read(dev_priv, dp_reg);
10598 if (val & DP_PORT_EN ||
10599 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10602 drm_dbg_kms(&dev_priv->drm,
10603 "Sanitizing transcoder select for DP %c\n",
10606 val &= ~DP_PIPE_SEL_MASK;
10607 val |= DP_PIPE_SEL(PIPE_A);
10609 intel_de_write(dev_priv, dp_reg, val);
10612 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10615 * The BIOS may select transcoder B on some of the PCH
10616 * ports even it doesn't enable the port. This would trip
10617 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10618 * Sanitize the transcoder select bits to prevent that. We
10619 * assume that the BIOS never actually enabled the port,
10620 * because if it did we'd actually have to toggle the port
10621 * on and back off to make the transcoder A select stick
10622 * (see. intel_dp_link_down(), intel_disable_hdmi(),
10623 * intel_disable_sdvo()).
10625 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10626 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10627 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10629 /* PCH SDVOB multiplex with HDMIB */
10630 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10631 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10632 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10635 /* Scan out the current hw modeset state,
10636 * and sanitizes it to the current state
10639 intel_modeset_setup_hw_state(struct drm_device *dev,
10640 struct drm_modeset_acquire_ctx *ctx)
10642 struct drm_i915_private *dev_priv = to_i915(dev);
10643 struct intel_encoder *encoder;
10644 struct intel_crtc *crtc;
10645 intel_wakeref_t wakeref;
10647 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10649 intel_early_display_was(dev_priv);
10650 intel_modeset_readout_hw_state(dev);
10652 /* HW state is read out, now we need to sanitize this mess. */
10653 get_encoder_power_domains(dev_priv);
10655 if (HAS_PCH_IBX(dev_priv))
10656 ibx_sanitize_pch_ports(dev_priv);
10659 * intel_sanitize_plane_mapping() may need to do vblank
10660 * waits, so we need vblank interrupts restored beforehand.
10662 for_each_intel_crtc(&dev_priv->drm, crtc) {
10663 struct intel_crtc_state *crtc_state =
10664 to_intel_crtc_state(crtc->base.state);
10666 drm_crtc_vblank_reset(&crtc->base);
10668 if (crtc_state->hw.active)
10669 intel_crtc_vblank_on(crtc_state);
10672 intel_sanitize_plane_mapping(dev_priv);
10674 for_each_intel_encoder(dev, encoder)
10675 intel_sanitize_encoder(encoder);
10677 for_each_intel_crtc(&dev_priv->drm, crtc) {
10678 struct intel_crtc_state *crtc_state =
10679 to_intel_crtc_state(crtc->base.state);
10681 intel_sanitize_crtc(crtc, ctx);
10682 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10685 intel_modeset_update_connector_atomic_state(dev);
10687 intel_dpll_sanitize_state(dev_priv);
10689 if (IS_G4X(dev_priv)) {
10690 g4x_wm_get_hw_state(dev_priv);
10691 g4x_wm_sanitize(dev_priv);
10692 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10693 vlv_wm_get_hw_state(dev_priv);
10694 vlv_wm_sanitize(dev_priv);
10695 } else if (DISPLAY_VER(dev_priv) >= 9) {
10696 skl_wm_get_hw_state(dev_priv);
10697 } else if (HAS_PCH_SPLIT(dev_priv)) {
10698 ilk_wm_get_hw_state(dev_priv);
10701 for_each_intel_crtc(dev, crtc) {
10702 struct intel_crtc_state *crtc_state =
10703 to_intel_crtc_state(crtc->base.state);
10706 put_domains = modeset_get_crtc_power_domains(crtc_state);
10707 if (drm_WARN_ON(dev, put_domains))
10708 modeset_put_crtc_power_domains(crtc, put_domains);
10711 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10714 void intel_display_resume(struct drm_device *dev)
10716 struct drm_i915_private *dev_priv = to_i915(dev);
10717 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10718 struct drm_modeset_acquire_ctx ctx;
10721 if (!HAS_DISPLAY(dev_priv))
10724 dev_priv->modeset_restore_state = NULL;
10726 state->acquire_ctx = &ctx;
10728 drm_modeset_acquire_init(&ctx, 0);
10731 ret = drm_modeset_lock_all_ctx(dev, &ctx);
10732 if (ret != -EDEADLK)
10735 drm_modeset_backoff(&ctx);
10739 ret = __intel_display_resume(dev, state, &ctx);
10741 intel_enable_ipc(dev_priv);
10742 drm_modeset_drop_locks(&ctx);
10743 drm_modeset_acquire_fini(&ctx);
10746 drm_err(&dev_priv->drm,
10747 "Restoring old state failed with %i\n", ret);
10749 drm_atomic_state_put(state);
10752 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10754 struct intel_connector *connector;
10755 struct drm_connector_list_iter conn_iter;
10757 /* Kill all the work that may have been queued by hpd. */
10758 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10759 for_each_intel_connector_iter(connector, &conn_iter) {
10760 if (connector->modeset_retry_work.func)
10761 cancel_work_sync(&connector->modeset_retry_work);
10762 if (connector->hdcp.shim) {
10763 cancel_delayed_work_sync(&connector->hdcp.check_work);
10764 cancel_work_sync(&connector->hdcp.prop_work);
10767 drm_connector_list_iter_end(&conn_iter);
10770 /* part #1: call before irq uninstall */
10771 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10773 if (!HAS_DISPLAY(i915))
10776 flush_workqueue(i915->flip_wq);
10777 flush_workqueue(i915->modeset_wq);
10779 flush_work(&i915->atomic_helper.free_work);
10780 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10783 /* part #2: call after irq uninstall */
10784 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10786 if (!HAS_DISPLAY(i915))
10790 * Due to the hpd irq storm handling the hotplug work can re-arm the
10791 * poll handlers. Hence disable polling after hpd handling is shut down.
10793 intel_hpd_poll_fini(i915);
10796 * MST topology needs to be suspended so we don't have any calls to
10797 * fbdev after it's finalized. MST will be destroyed later as part of
10798 * drm_mode_config_cleanup()
10800 intel_dp_mst_suspend(i915);
10802 /* poll work can call into fbdev, hence clean that up afterwards */
10803 intel_fbdev_fini(i915);
10805 intel_unregister_dsm_handler();
10807 intel_fbc_global_disable(i915);
10809 /* flush any delayed tasks or pending work */
10810 flush_scheduled_work();
10812 intel_hdcp_component_fini(i915);
10814 intel_mode_config_cleanup(i915);
10816 intel_overlay_cleanup(i915);
10818 intel_gmbus_teardown(i915);
10820 destroy_workqueue(i915->flip_wq);
10821 destroy_workqueue(i915->modeset_wq);
10823 intel_fbc_cleanup(i915);
10826 /* part #3: call after gem init */
10827 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10829 intel_dmc_ucode_fini(i915);
10831 intel_power_domains_driver_remove(i915);
10833 intel_vga_unregister(i915);
10835 intel_bios_driver_remove(i915);
10838 bool intel_modeset_probe_defer(struct pci_dev *pdev)
10840 struct drm_privacy_screen *privacy_screen;
10843 * apple-gmux is needed on dual GPU MacBook Pro
10844 * to probe the panel if we're the inactive GPU.
10846 if (vga_switcheroo_client_probe_defer(pdev))
10849 /* If the LCD panel has a privacy-screen, wait for it */
10850 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10851 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10854 drm_privacy_screen_put(privacy_screen);
10859 void intel_display_driver_register(struct drm_i915_private *i915)
10861 if (!HAS_DISPLAY(i915))
10864 intel_display_debugfs_register(i915);
10866 /* Must be done after probing outputs */
10867 intel_opregion_register(i915);
10868 acpi_video_register();
10870 intel_audio_init(i915);
10873 * Some ports require correctly set-up hpd registers for
10874 * detection to work properly (leading to ghost connected
10875 * connector status), e.g. VGA on gm45. Hence we can only set
10876 * up the initial fbdev config after hpd irqs are fully
10877 * enabled. We do it last so that the async config cannot run
10878 * before the connectors are registered.
10880 intel_fbdev_initial_config_async(&i915->drm);
10883 * We need to coordinate the hotplugs with the asynchronous
10884 * fbdev configuration, for which we use the
10885 * fbdev->async_cookie.
10887 drm_kms_helper_poll_init(&i915->drm);
10890 void intel_display_driver_unregister(struct drm_i915_private *i915)
10892 if (!HAS_DISPLAY(i915))
10895 intel_fbdev_unregister(i915);
10896 intel_audio_deinit(i915);
10899 * After flushing the fbdev (incl. a late async config which
10900 * will have delayed queuing of a hotplug event), then flush
10901 * the hotplug events.
10903 drm_kms_helper_poll_fini(&i915->drm);
10904 drm_atomic_helper_shutdown(&i915->drm);
10906 acpi_video_unregister();
10907 intel_opregion_unregister(i915);