2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 #include <linux/vga_switcheroo.h>
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_dp_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_privacy_screen_consumer.h>
46 #include <drm/drm_probe_helper.h>
47 #include <drm/drm_rect.h>
49 #include "display/intel_audio.h"
50 #include "display/intel_crt.h"
51 #include "display/intel_ddi.h"
52 #include "display/intel_display_debugfs.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
73 #include "gt/gen8_ppgtt.h"
79 #include "intel_acpi.h"
80 #include "intel_atomic.h"
81 #include "intel_atomic_plane.h"
83 #include "intel_cdclk.h"
84 #include "intel_color.h"
85 #include "intel_crtc.h"
87 #include "intel_display_types.h"
88 #include "intel_dmc.h"
89 #include "intel_dp_link_training.h"
90 #include "intel_dpt.h"
91 #include "intel_fbc.h"
92 #include "intel_fbdev.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdcp.h"
97 #include "intel_hotplug.h"
98 #include "intel_overlay.h"
99 #include "intel_panel.h"
100 #include "intel_pch_display.h"
101 #include "intel_pch_refclk.h"
102 #include "intel_pcode.h"
103 #include "intel_pipe_crc.h"
104 #include "intel_plane_initial.h"
105 #include "intel_pm.h"
106 #include "intel_pps.h"
107 #include "intel_psr.h"
108 #include "intel_quirks.h"
109 #include "intel_sprite.h"
110 #include "intel_tc.h"
111 #include "intel_vga.h"
112 #include "i9xx_plane.h"
113 #include "skl_scaler.h"
114 #include "skl_universal_plane.h"
115 #include "vlv_dsi_pll.h"
116 #include "vlv_sideband.h"
119 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
120 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
121 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
122 const struct intel_link_m_n *m_n);
123 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
126 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
127 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
128 static void intel_modeset_setup_hw_state(struct drm_device *dev,
129 struct drm_modeset_acquire_ctx *ctx);
132 * intel_update_watermarks - update FIFO watermark values based on current modes
133 * @dev_priv: i915 device
135 * Calculate watermark values for the various WM regs based on current mode
136 * and plane configuration.
138 * There are several cases to deal with here:
139 * - normal (i.e. non-self-refresh)
140 * - self-refresh (SR) mode
141 * - lines are large relative to FIFO size (buffer can hold up to 2)
142 * - lines are small relative to FIFO size (buffer can hold more than 2
143 * lines), so need to account for TLB latency
145 * The normal calculation is:
146 * watermark = dotclock * bytes per pixel * latency
147 * where latency is platform & configuration dependent (we assume pessimal
150 * The SR calculation is:
151 * watermark = (trunc(latency/line time)+1) * surface width *
154 * line time = htotal / dotclock
155 * surface width = hdisplay for normal plane and 64 for cursor
156 * and latency is assumed to be high, as above.
158 * The final value programmed to the register should always be rounded up,
159 * and include an extra 2 entries to account for clock crossings.
161 * We don't use the sprite, so we can ignore that. And on Crestline we have
162 * to set the non-SR watermarks to 8.
164 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
166 if (dev_priv->wm_disp->update_wm)
167 dev_priv->wm_disp->update_wm(dev_priv);
170 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
171 struct intel_crtc *crtc)
173 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
174 if (dev_priv->wm_disp->compute_pipe_wm)
175 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
179 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
180 struct intel_crtc *crtc)
182 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
183 if (!dev_priv->wm_disp->compute_intermediate_wm)
185 if (drm_WARN_ON(&dev_priv->drm,
186 !dev_priv->wm_disp->compute_pipe_wm))
188 return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
191 static bool intel_initial_watermarks(struct intel_atomic_state *state,
192 struct intel_crtc *crtc)
194 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
195 if (dev_priv->wm_disp->initial_watermarks) {
196 dev_priv->wm_disp->initial_watermarks(state, crtc);
202 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
203 struct intel_crtc *crtc)
205 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
206 if (dev_priv->wm_disp->atomic_update_watermarks)
207 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
210 static void intel_optimize_watermarks(struct intel_atomic_state *state,
211 struct intel_crtc *crtc)
213 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
214 if (dev_priv->wm_disp->optimize_watermarks)
215 dev_priv->wm_disp->optimize_watermarks(state, crtc);
218 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
220 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
221 if (dev_priv->wm_disp->compute_global_watermarks)
222 return dev_priv->wm_disp->compute_global_watermarks(state);
226 /* returns HPLL frequency in kHz */
227 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
229 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
231 /* Obtain SKU information */
232 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
233 CCK_FUSE_HPLL_FREQ_MASK;
235 return vco_freq[hpll_freq] * 1000;
238 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
239 const char *name, u32 reg, int ref_freq)
244 val = vlv_cck_read(dev_priv, reg);
245 divider = val & CCK_FREQUENCY_VALUES;
247 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
248 (divider << CCK_FREQUENCY_STATUS_SHIFT),
249 "%s change in progress\n", name);
251 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
254 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
255 const char *name, u32 reg)
259 vlv_cck_get(dev_priv);
261 if (dev_priv->hpll_freq == 0)
262 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
264 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
266 vlv_cck_put(dev_priv);
271 static void intel_update_czclk(struct drm_i915_private *dev_priv)
273 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
276 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
277 CCK_CZ_CLOCK_CONTROL);
279 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
280 dev_priv->czclk_freq);
283 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
285 return (crtc_state->active_planes &
286 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
289 /* WA Display #0827: Gen9:all */
291 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
294 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
295 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
297 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
298 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
301 /* Wa_2006604312:icl,ehl */
303 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
307 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
308 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
310 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
311 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
314 /* Wa_1604331009:icl,jsl,ehl */
316 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
319 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
320 enable ? CURSOR_GATING_DIS : 0);
324 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
326 return crtc_state->master_transcoder != INVALID_TRANSCODER;
330 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
332 return crtc_state->sync_mode_slaves_mask != 0;
336 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
338 return is_trans_port_sync_master(crtc_state) ||
339 is_trans_port_sync_slave(crtc_state);
342 static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
344 if (crtc_state->bigjoiner_slave)
345 return crtc_state->bigjoiner_linked_crtc;
347 return to_intel_crtc(crtc_state->uapi.crtc);
350 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
353 i915_reg_t reg = PIPEDSL(pipe);
356 line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
358 line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
360 return line1 != line2;
363 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
365 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
366 enum pipe pipe = crtc->pipe;
368 /* Wait for the display line to settle/start moving */
369 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
370 drm_err(&dev_priv->drm,
371 "pipe %c scanline %s wait timed out\n",
372 pipe_name(pipe), onoff(state));
375 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
377 wait_for_pipe_scanline_moving(crtc, false);
380 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
382 wait_for_pipe_scanline_moving(crtc, true);
386 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
388 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
389 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
391 if (DISPLAY_VER(dev_priv) >= 4) {
392 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
394 /* Wait for the Pipe State to go off */
395 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
396 PIPECONF_STATE_ENABLE, 100))
397 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
399 intel_wait_for_pipe_scanline_stopped(crtc);
403 void assert_transcoder(struct drm_i915_private *dev_priv,
404 enum transcoder cpu_transcoder, bool state)
407 enum intel_display_power_domain power_domain;
408 intel_wakeref_t wakeref;
410 /* we keep both pipes enabled on 830 */
411 if (IS_I830(dev_priv))
414 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
415 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
417 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
418 cur_state = !!(val & PIPECONF_ENABLE);
420 intel_display_power_put(dev_priv, power_domain, wakeref);
425 I915_STATE_WARN(cur_state != state,
426 "transcoder %s assertion failure (expected %s, current %s)\n",
427 transcoder_name(cpu_transcoder),
428 onoff(state), onoff(cur_state));
431 static void assert_plane(struct intel_plane *plane, bool state)
436 cur_state = plane->get_hw_state(plane, &pipe);
438 I915_STATE_WARN(cur_state != state,
439 "%s assertion failure (expected %s, current %s)\n",
440 plane->base.name, onoff(state), onoff(cur_state));
443 #define assert_plane_enabled(p) assert_plane(p, true)
444 #define assert_plane_disabled(p) assert_plane(p, false)
446 static void assert_planes_disabled(struct intel_crtc *crtc)
448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
449 struct intel_plane *plane;
451 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
452 assert_plane_disabled(plane);
455 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
456 struct intel_digital_port *dig_port,
457 unsigned int expected_mask)
462 switch (dig_port->base.port) {
464 port_mask = DPLL_PORTB_READY_MASK;
468 port_mask = DPLL_PORTC_READY_MASK;
473 port_mask = DPLL_PORTD_READY_MASK;
474 dpll_reg = DPIO_PHY_STATUS;
480 if (intel_de_wait_for_register(dev_priv, dpll_reg,
481 port_mask, expected_mask, 1000))
482 drm_WARN(&dev_priv->drm, 1,
483 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
484 dig_port->base.base.base.id, dig_port->base.base.name,
485 intel_de_read(dev_priv, dpll_reg) & port_mask,
489 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
493 if (HAS_PCH_LPT(dev_priv))
499 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
501 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
502 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
503 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
504 enum pipe pipe = crtc->pipe;
508 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
510 assert_planes_disabled(crtc);
513 * A pipe without a PLL won't actually be able to drive bits from
514 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
517 if (HAS_GMCH(dev_priv)) {
518 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
519 assert_dsi_pll_enabled(dev_priv);
521 assert_pll_enabled(dev_priv, pipe);
523 if (new_crtc_state->has_pch_encoder) {
524 /* if driving the PCH, we need FDI enabled */
525 assert_fdi_rx_pll_enabled(dev_priv,
526 intel_crtc_pch_transcoder(crtc));
527 assert_fdi_tx_pll_enabled(dev_priv,
528 (enum pipe) cpu_transcoder);
530 /* FIXME: assert CPU port conditions for SNB+ */
533 /* Wa_22012358565:adl-p */
534 if (DISPLAY_VER(dev_priv) == 13)
535 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
536 0, PIPE_ARB_USE_PROG_SLOTS);
538 reg = PIPECONF(cpu_transcoder);
539 val = intel_de_read(dev_priv, reg);
540 if (val & PIPECONF_ENABLE) {
541 /* we keep both pipes enabled on 830 */
542 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
546 intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
547 intel_de_posting_read(dev_priv, reg);
550 * Until the pipe starts PIPEDSL reads will return a stale value,
551 * which causes an apparent vblank timestamp jump when PIPEDSL
552 * resets to its proper value. That also messes up the frame count
553 * when it's derived from the timestamps. So let's wait for the
554 * pipe to start properly before we call drm_crtc_vblank_on()
556 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
557 intel_wait_for_pipe_scanline_moving(crtc);
560 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
562 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
563 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
564 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
565 enum pipe pipe = crtc->pipe;
569 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
572 * Make sure planes won't keep trying to pump pixels to us,
573 * or we might hang the display.
575 assert_planes_disabled(crtc);
577 reg = PIPECONF(cpu_transcoder);
578 val = intel_de_read(dev_priv, reg);
579 if ((val & PIPECONF_ENABLE) == 0)
583 * Double wide has implications for planes
584 * so best keep it disabled when not needed.
586 if (old_crtc_state->double_wide)
587 val &= ~PIPECONF_DOUBLE_WIDE;
589 /* Don't disable pipe or pipe PLLs if needed */
590 if (!IS_I830(dev_priv))
591 val &= ~PIPECONF_ENABLE;
593 if (DISPLAY_VER(dev_priv) >= 12)
594 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
595 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
597 intel_de_write(dev_priv, reg, val);
598 if ((val & PIPECONF_ENABLE) == 0)
599 intel_wait_for_pipe_off(old_crtc_state);
602 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
604 unsigned int size = 0;
607 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
608 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
613 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
615 unsigned int size = 0;
618 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
619 unsigned int plane_size;
621 if (rem_info->plane[i].linear)
622 plane_size = rem_info->plane[i].size;
624 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
629 if (rem_info->plane_alignment)
630 size = ALIGN(size, rem_info->plane_alignment);
638 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
640 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
641 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
643 return DISPLAY_VER(dev_priv) < 4 ||
645 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
649 * Convert the x/y offsets into a linear offset.
650 * Only valid with 0/180 degree rotation, which is fine since linear
651 * offset is only used with linear buffers on pre-hsw and tiled buffers
652 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
654 u32 intel_fb_xy_to_linear(int x, int y,
655 const struct intel_plane_state *state,
658 const struct drm_framebuffer *fb = state->hw.fb;
659 unsigned int cpp = fb->format->cpp[color_plane];
660 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
662 return y * pitch + x * cpp;
666 * Add the x/y offsets derived from fb->offsets[] to the user
667 * specified plane src x/y offsets. The resulting x/y offsets
668 * specify the start of scanout from the beginning of the gtt mapping.
670 void intel_add_fb_offsets(int *x, int *y,
671 const struct intel_plane_state *state,
675 *x += state->view.color_plane[color_plane].x;
676 *y += state->view.color_plane[color_plane].y;
679 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
680 u32 pixel_format, u64 modifier)
682 struct intel_crtc *crtc;
683 struct intel_plane *plane;
685 if (!HAS_DISPLAY(dev_priv))
689 * We assume the primary plane for pipe A has
690 * the highest stride limits of them all,
691 * if in case pipe A is disabled, use the first pipe from pipe_mask.
693 crtc = intel_first_crtc(dev_priv);
697 plane = to_intel_plane(crtc->base.primary);
699 return plane->max_stride(plane, pixel_format, modifier,
704 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
705 struct intel_plane_state *plane_state,
708 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
710 plane_state->uapi.visible = visible;
713 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
715 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
718 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
720 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
721 struct drm_plane *plane;
724 * Active_planes aliases if multiple "primary" or cursor planes
725 * have been used on the same (or wrong) pipe. plane_mask uses
726 * unique ids, hence we can use that to reconstruct active_planes.
728 crtc_state->enabled_planes = 0;
729 crtc_state->active_planes = 0;
731 drm_for_each_plane_mask(plane, &dev_priv->drm,
732 crtc_state->uapi.plane_mask) {
733 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
734 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
738 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
739 struct intel_plane *plane)
741 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
742 struct intel_crtc_state *crtc_state =
743 to_intel_crtc_state(crtc->base.state);
744 struct intel_plane_state *plane_state =
745 to_intel_plane_state(plane->base.state);
747 drm_dbg_kms(&dev_priv->drm,
748 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
749 plane->base.base.id, plane->base.name,
750 crtc->base.base.id, crtc->base.name);
752 intel_set_plane_visible(crtc_state, plane_state, false);
753 fixup_plane_bitmasks(crtc_state);
754 crtc_state->data_rate[plane->id] = 0;
755 crtc_state->min_cdclk[plane->id] = 0;
757 if (plane->id == PLANE_PRIMARY)
758 hsw_disable_ips(crtc_state);
761 * Vblank time updates from the shadow to live plane control register
762 * are blocked if the memory self-refresh mode is active at that
763 * moment. So to make sure the plane gets truly disabled, disable
764 * first the self-refresh mode. The self-refresh enable bit in turn
765 * will be checked/applied by the HW only at the next frame start
766 * event which is after the vblank start event, so we need to have a
767 * wait-for-vblank between disabling the plane and the pipe.
769 if (HAS_GMCH(dev_priv) &&
770 intel_set_memory_cxsr(dev_priv, false))
771 intel_crtc_wait_for_next_vblank(crtc);
774 * Gen2 reports pipe underruns whenever all planes are disabled.
775 * So disable underrun reporting before all the planes get disabled.
777 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
778 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
780 intel_plane_disable_arm(plane, crtc_state);
781 intel_crtc_wait_for_next_vblank(crtc);
785 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
789 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
790 plane_state->view.color_plane[0].offset, 0);
796 __intel_display_resume(struct drm_device *dev,
797 struct drm_atomic_state *state,
798 struct drm_modeset_acquire_ctx *ctx)
800 struct drm_crtc_state *crtc_state;
801 struct drm_crtc *crtc;
804 intel_modeset_setup_hw_state(dev, ctx);
805 intel_vga_redisable(to_i915(dev));
811 * We've duplicated the state, pointers to the old state are invalid.
813 * Don't attempt to use the old state until we commit the duplicated state.
815 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
817 * Force recalculation even if we restore
818 * current state. With fast modeset this may not result
819 * in a modeset when the state is compatible.
821 crtc_state->mode_changed = true;
824 /* ignore any reset values/BIOS leftovers in the WM registers */
825 if (!HAS_GMCH(to_i915(dev)))
826 to_intel_atomic_state(state)->skip_intermediate_wm = true;
828 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
830 drm_WARN_ON(dev, ret == -EDEADLK);
834 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
836 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
837 intel_has_gpu_reset(to_gt(dev_priv)));
840 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
842 struct drm_device *dev = &dev_priv->drm;
843 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
844 struct drm_atomic_state *state;
847 if (!HAS_DISPLAY(dev_priv))
850 /* reset doesn't touch the display */
851 if (!dev_priv->params.force_reset_modeset_test &&
852 !gpu_reset_clobbers_display(dev_priv))
855 /* We have a modeset vs reset deadlock, defensively unbreak it. */
856 set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
857 smp_mb__after_atomic();
858 wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
860 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
861 drm_dbg_kms(&dev_priv->drm,
862 "Modeset potentially stuck, unbreaking through wedging\n");
863 intel_gt_set_wedged(to_gt(dev_priv));
867 * Need mode_config.mutex so that we don't
868 * trample ongoing ->detect() and whatnot.
870 mutex_lock(&dev->mode_config.mutex);
871 drm_modeset_acquire_init(ctx, 0);
873 ret = drm_modeset_lock_all_ctx(dev, ctx);
877 drm_modeset_backoff(ctx);
880 * Disabling the crtcs gracefully seems nicer. Also the
881 * g33 docs say we should at least disable all the planes.
883 state = drm_atomic_helper_duplicate_state(dev, ctx);
885 ret = PTR_ERR(state);
886 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
891 ret = drm_atomic_helper_disable_all(dev, ctx);
893 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
895 drm_atomic_state_put(state);
899 dev_priv->modeset_restore_state = state;
900 state->acquire_ctx = ctx;
903 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
905 struct drm_device *dev = &dev_priv->drm;
906 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
907 struct drm_atomic_state *state;
910 if (!HAS_DISPLAY(dev_priv))
913 /* reset doesn't touch the display */
914 if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
917 state = fetch_and_zero(&dev_priv->modeset_restore_state);
921 /* reset doesn't touch the display */
922 if (!gpu_reset_clobbers_display(dev_priv)) {
923 /* for testing only restore the display */
924 ret = __intel_display_resume(dev, state, ctx);
926 drm_err(&dev_priv->drm,
927 "Restoring old state failed with %i\n", ret);
930 * The display has been reset as well,
931 * so need a full re-initialization.
933 intel_pps_unlock_regs_wa(dev_priv);
934 intel_modeset_init_hw(dev_priv);
935 intel_init_clock_gating(dev_priv);
936 intel_hpd_init(dev_priv);
938 ret = __intel_display_resume(dev, state, ctx);
940 drm_err(&dev_priv->drm,
941 "Restoring old state failed with %i\n", ret);
943 intel_hpd_poll_disable(dev_priv);
946 drm_atomic_state_put(state);
948 drm_modeset_drop_locks(ctx);
949 drm_modeset_acquire_fini(ctx);
950 mutex_unlock(&dev->mode_config.mutex);
952 clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
955 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
957 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
958 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
959 enum pipe pipe = crtc->pipe;
962 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
965 * Display WA #1153: icl
966 * enable hardware to bypass the alpha math
967 * and rounding for per-pixel values 00 and 0xff
969 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
971 * Display WA # 1605353570: icl
972 * Set the pixel rounding bit to 1 for allowing
973 * passthrough of Frame buffer pixels unmodified
976 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
979 * Underrun recovery must always be disabled on display 13+.
980 * DG2 chicken bit meaning is inverted compared to other platforms.
982 if (IS_DG2(dev_priv))
983 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
984 else if (DISPLAY_VER(dev_priv) >= 13)
985 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
987 /* Wa_14010547955:dg2 */
988 if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
989 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
991 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
994 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
996 struct drm_crtc *crtc;
999 drm_for_each_crtc(crtc, &dev_priv->drm) {
1000 struct drm_crtc_commit *commit;
1001 spin_lock(&crtc->commit_lock);
1002 commit = list_first_entry_or_null(&crtc->commit_list,
1003 struct drm_crtc_commit, commit_entry);
1004 cleanup_done = commit ?
1005 try_wait_for_completion(&commit->cleanup_done) : true;
1006 spin_unlock(&crtc->commit_lock);
1011 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1020 * Finds the encoder associated with the given CRTC. This can only be
1021 * used when we know that the CRTC isn't feeding multiple encoders!
1023 struct intel_encoder *
1024 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1025 const struct intel_crtc_state *crtc_state)
1027 const struct drm_connector_state *connector_state;
1028 const struct drm_connector *connector;
1029 struct intel_encoder *encoder = NULL;
1030 struct intel_crtc *master_crtc;
1031 int num_encoders = 0;
1034 master_crtc = intel_master_crtc(crtc_state);
1036 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1037 if (connector_state->crtc != &master_crtc->base)
1040 encoder = to_intel_encoder(connector_state->best_encoder);
1044 drm_WARN(encoder->base.dev, num_encoders != 1,
1045 "%d encoders for pipe %c\n",
1046 num_encoders, pipe_name(master_crtc->pipe));
1051 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1054 i915_reg_t dslreg = PIPEDSL(pipe);
1057 temp = intel_de_read(dev_priv, dslreg);
1059 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1060 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1061 drm_err(&dev_priv->drm,
1062 "mode set failed: pipe %c stuck\n",
1067 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1069 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1070 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1071 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1072 enum pipe pipe = crtc->pipe;
1073 int width = drm_rect_width(dst);
1074 int height = drm_rect_height(dst);
1078 if (!crtc_state->pch_pfit.enabled)
1081 /* Force use of hard-coded filter coefficients
1082 * as some pre-programmed values are broken,
1085 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1086 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1087 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1089 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1091 intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1092 intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1095 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1097 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1098 struct drm_device *dev = crtc->base.dev;
1099 struct drm_i915_private *dev_priv = to_i915(dev);
1101 if (!crtc_state->ips_enabled)
1105 * We can only enable IPS after we enable a plane and wait for a vblank
1106 * This function is called from post_plane_update, which is run after
1109 drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1111 if (IS_BROADWELL(dev_priv)) {
1112 drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1113 IPS_ENABLE | IPS_PCODE_CONTROL));
1114 /* Quoting Art Runyan: "its not safe to expect any particular
1115 * value in IPS_CTL bit 31 after enabling IPS through the
1116 * mailbox." Moreover, the mailbox may return a bogus state,
1117 * so we need to just enable it and continue on.
1120 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1121 /* The bit only becomes 1 in the next vblank, so this wait here
1122 * is essentially intel_wait_for_vblank. If we don't have this
1123 * and don't wait for vblanks until the end of crtc_enable, then
1124 * the HW state readout code will complain that the expected
1125 * IPS_CTL value is not the one we read. */
1126 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1127 drm_err(&dev_priv->drm,
1128 "Timed out waiting for IPS enable\n");
1132 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1134 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1135 struct drm_device *dev = crtc->base.dev;
1136 struct drm_i915_private *dev_priv = to_i915(dev);
1138 if (!crtc_state->ips_enabled)
1141 if (IS_BROADWELL(dev_priv)) {
1143 snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1145 * Wait for PCODE to finish disabling IPS. The BSpec specified
1146 * 42ms timeout value leads to occasional timeouts so use 100ms
1149 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1150 drm_err(&dev_priv->drm,
1151 "Timed out waiting for IPS disable\n");
1153 intel_de_write(dev_priv, IPS_CTL, 0);
1154 intel_de_posting_read(dev_priv, IPS_CTL);
1157 /* We need to wait for a vblank before we can disable the plane. */
1158 intel_crtc_wait_for_next_vblank(crtc);
1161 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1164 (void) intel_overlay_switch_off(crtc->overlay);
1166 /* Let userspace switch the overlay on again. In most cases userspace
1167 * has to recompute where to put it anyway.
1171 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1172 const struct intel_crtc_state *new_crtc_state)
1174 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1175 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1177 if (!old_crtc_state->ips_enabled)
1180 if (intel_crtc_needs_modeset(new_crtc_state))
1184 * Workaround : Do not read or write the pipe palette/gamma data while
1185 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1187 * Disable IPS before we program the LUT.
1189 if (IS_HASWELL(dev_priv) &&
1190 (new_crtc_state->uapi.color_mgmt_changed ||
1191 new_crtc_state->update_pipe) &&
1192 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1195 return !new_crtc_state->ips_enabled;
1198 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1199 const struct intel_crtc_state *new_crtc_state)
1201 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1202 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1204 if (!new_crtc_state->ips_enabled)
1207 if (intel_crtc_needs_modeset(new_crtc_state))
1211 * Workaround : Do not read or write the pipe palette/gamma data while
1212 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1214 * Re-enable IPS after the LUT has been programmed.
1216 if (IS_HASWELL(dev_priv) &&
1217 (new_crtc_state->uapi.color_mgmt_changed ||
1218 new_crtc_state->update_pipe) &&
1219 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1223 * We can't read out IPS on broadwell, assume the worst and
1224 * forcibly enable IPS on the first fastset.
1226 if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1229 return !old_crtc_state->ips_enabled;
1232 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1234 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1236 if (!crtc_state->nv12_planes)
1239 /* WA Display #0827: Gen9:all */
1240 if (DISPLAY_VER(dev_priv) == 9)
1246 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1248 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1250 /* Wa_2006604312:icl,ehl */
1251 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1257 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1259 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1261 /* Wa_1604331009:icl,jsl,ehl */
1262 if (is_hdr_mode(crtc_state) &&
1263 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1264 DISPLAY_VER(dev_priv) == 11)
1270 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1271 enum pipe pipe, bool enable)
1273 if (DISPLAY_VER(i915) == 9) {
1275 * "Plane N strech max must be programmed to 11b (x1)
1276 * when Async flips are enabled on that plane."
1278 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1279 SKL_PLANE1_STRETCH_MAX_MASK,
1280 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1282 /* Also needed on HSW/BDW albeit undocumented */
1283 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1284 HSW_PRI_STRETCH_MAX_MASK,
1285 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1289 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1291 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1293 return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
1294 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1297 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1298 const struct intel_crtc_state *new_crtc_state)
1300 return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1301 new_crtc_state->active_planes;
1304 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1305 const struct intel_crtc_state *new_crtc_state)
1307 return old_crtc_state->active_planes &&
1308 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1311 static void intel_post_plane_update(struct intel_atomic_state *state,
1312 struct intel_crtc *crtc)
1314 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1315 const struct intel_crtc_state *old_crtc_state =
1316 intel_atomic_get_old_crtc_state(state, crtc);
1317 const struct intel_crtc_state *new_crtc_state =
1318 intel_atomic_get_new_crtc_state(state, crtc);
1319 enum pipe pipe = crtc->pipe;
1321 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1323 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1324 intel_update_watermarks(dev_priv);
1326 if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1327 hsw_enable_ips(new_crtc_state);
1329 intel_fbc_post_update(state, crtc);
1330 intel_drrs_page_flip(state, crtc);
1332 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1333 !needs_async_flip_vtd_wa(new_crtc_state))
1334 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1336 if (needs_nv12_wa(old_crtc_state) &&
1337 !needs_nv12_wa(new_crtc_state))
1338 skl_wa_827(dev_priv, pipe, false);
1340 if (needs_scalerclk_wa(old_crtc_state) &&
1341 !needs_scalerclk_wa(new_crtc_state))
1342 icl_wa_scalerclkgating(dev_priv, pipe, false);
1344 if (needs_cursorclk_wa(old_crtc_state) &&
1345 !needs_cursorclk_wa(new_crtc_state))
1346 icl_wa_cursorclkgating(dev_priv, pipe, false);
1350 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1351 struct intel_crtc *crtc)
1353 const struct intel_crtc_state *crtc_state =
1354 intel_atomic_get_new_crtc_state(state, crtc);
1355 u8 update_planes = crtc_state->update_planes;
1356 const struct intel_plane_state *plane_state;
1357 struct intel_plane *plane;
1360 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1361 if (plane->enable_flip_done &&
1362 plane->pipe == crtc->pipe &&
1363 update_planes & BIT(plane->id) &&
1364 plane_state->do_async_flip)
1365 plane->enable_flip_done(plane);
1369 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1370 struct intel_crtc *crtc)
1372 const struct intel_crtc_state *crtc_state =
1373 intel_atomic_get_new_crtc_state(state, crtc);
1374 u8 update_planes = crtc_state->update_planes;
1375 const struct intel_plane_state *plane_state;
1376 struct intel_plane *plane;
1379 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1380 if (plane->disable_flip_done &&
1381 plane->pipe == crtc->pipe &&
1382 update_planes & BIT(plane->id) &&
1383 plane_state->do_async_flip)
1384 plane->disable_flip_done(plane);
1388 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1389 struct intel_crtc *crtc)
1391 const struct intel_crtc_state *old_crtc_state =
1392 intel_atomic_get_old_crtc_state(state, crtc);
1393 const struct intel_crtc_state *new_crtc_state =
1394 intel_atomic_get_new_crtc_state(state, crtc);
1395 u8 update_planes = new_crtc_state->update_planes;
1396 const struct intel_plane_state *old_plane_state;
1397 struct intel_plane *plane;
1398 bool need_vbl_wait = false;
1401 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1402 if (plane->need_async_flip_disable_wa &&
1403 plane->pipe == crtc->pipe &&
1404 update_planes & BIT(plane->id)) {
1406 * Apart from the async flip bit we want to
1407 * preserve the old state for the plane.
1409 plane->async_flip(plane, old_crtc_state,
1410 old_plane_state, false);
1411 need_vbl_wait = true;
1416 intel_crtc_wait_for_next_vblank(crtc);
1419 static void intel_pre_plane_update(struct intel_atomic_state *state,
1420 struct intel_crtc *crtc)
1422 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1423 const struct intel_crtc_state *old_crtc_state =
1424 intel_atomic_get_old_crtc_state(state, crtc);
1425 const struct intel_crtc_state *new_crtc_state =
1426 intel_atomic_get_new_crtc_state(state, crtc);
1427 enum pipe pipe = crtc->pipe;
1429 intel_psr_pre_plane_update(state, crtc);
1431 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1432 hsw_disable_ips(old_crtc_state);
1434 if (intel_fbc_pre_update(state, crtc))
1435 intel_crtc_wait_for_next_vblank(crtc);
1437 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1438 needs_async_flip_vtd_wa(new_crtc_state))
1439 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1441 /* Display WA 827 */
1442 if (!needs_nv12_wa(old_crtc_state) &&
1443 needs_nv12_wa(new_crtc_state))
1444 skl_wa_827(dev_priv, pipe, true);
1446 /* Wa_2006604312:icl,ehl */
1447 if (!needs_scalerclk_wa(old_crtc_state) &&
1448 needs_scalerclk_wa(new_crtc_state))
1449 icl_wa_scalerclkgating(dev_priv, pipe, true);
1451 /* Wa_1604331009:icl,jsl,ehl */
1452 if (!needs_cursorclk_wa(old_crtc_state) &&
1453 needs_cursorclk_wa(new_crtc_state))
1454 icl_wa_cursorclkgating(dev_priv, pipe, true);
1457 * Vblank time updates from the shadow to live plane control register
1458 * are blocked if the memory self-refresh mode is active at that
1459 * moment. So to make sure the plane gets truly disabled, disable
1460 * first the self-refresh mode. The self-refresh enable bit in turn
1461 * will be checked/applied by the HW only at the next frame start
1462 * event which is after the vblank start event, so we need to have a
1463 * wait-for-vblank between disabling the plane and the pipe.
1465 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1466 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1467 intel_crtc_wait_for_next_vblank(crtc);
1470 * IVB workaround: must disable low power watermarks for at least
1471 * one frame before enabling scaling. LP watermarks can be re-enabled
1472 * when scaling is disabled.
1474 * WaCxSRDisabledForSpriteScaling:ivb
1476 if (old_crtc_state->hw.active &&
1477 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1478 intel_crtc_wait_for_next_vblank(crtc);
1481 * If we're doing a modeset we don't need to do any
1482 * pre-vblank watermark programming here.
1484 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1486 * For platforms that support atomic watermarks, program the
1487 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1488 * will be the intermediate values that are safe for both pre- and
1489 * post- vblank; when vblank happens, the 'active' values will be set
1490 * to the final 'target' values and we'll do this again to get the
1491 * optimal watermarks. For gen9+ platforms, the values we program here
1492 * will be the final target values which will get automatically latched
1493 * at vblank time; no further programming will be necessary.
1495 * If a platform hasn't been transitioned to atomic watermarks yet,
1496 * we'll continue to update watermarks the old way, if flags tell
1499 if (!intel_initial_watermarks(state, crtc))
1500 if (new_crtc_state->update_wm_pre)
1501 intel_update_watermarks(dev_priv);
1505 * Gen2 reports pipe underruns whenever all planes are disabled.
1506 * So disable underrun reporting before all the planes get disabled.
1508 * We do this after .initial_watermarks() so that we have a
1509 * chance of catching underruns with the intermediate watermarks
1510 * vs. the old plane configuration.
1512 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1513 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1516 * WA for platforms where async address update enable bit
1517 * is double buffered and only latched at start of vblank.
1519 if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1520 intel_crtc_async_flip_disable_wa(state, crtc);
1523 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1524 struct intel_crtc *crtc)
1526 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1527 const struct intel_crtc_state *new_crtc_state =
1528 intel_atomic_get_new_crtc_state(state, crtc);
1529 unsigned int update_mask = new_crtc_state->update_planes;
1530 const struct intel_plane_state *old_plane_state;
1531 struct intel_plane *plane;
1532 unsigned fb_bits = 0;
1535 intel_crtc_dpms_overlay_disable(crtc);
1537 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1538 if (crtc->pipe != plane->pipe ||
1539 !(update_mask & BIT(plane->id)))
1542 intel_plane_disable_arm(plane, new_crtc_state);
1544 if (old_plane_state->uapi.visible)
1545 fb_bits |= plane->frontbuffer_bit;
1548 intel_frontbuffer_flip(dev_priv, fb_bits);
1552 * intel_connector_primary_encoder - get the primary encoder for a connector
1553 * @connector: connector for which to return the encoder
1555 * Returns the primary encoder for a connector. There is a 1:1 mapping from
1556 * all connectors to their encoder, except for DP-MST connectors which have
1557 * both a virtual and a primary encoder. These DP-MST primary encoders can be
1558 * pointed to by as many DP-MST connectors as there are pipes.
1560 static struct intel_encoder *
1561 intel_connector_primary_encoder(struct intel_connector *connector)
1563 struct intel_encoder *encoder;
1565 if (connector->mst_port)
1566 return &dp_to_dig_port(connector->mst_port)->base;
1568 encoder = intel_attached_encoder(connector);
1569 drm_WARN_ON(connector->base.dev, !encoder);
1574 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1576 struct drm_i915_private *i915 = to_i915(state->base.dev);
1577 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1578 struct intel_crtc *crtc;
1579 struct drm_connector_state *new_conn_state;
1580 struct drm_connector *connector;
1584 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1585 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1587 if (i915->dpll.mgr) {
1588 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1589 if (intel_crtc_needs_modeset(new_crtc_state))
1592 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1593 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1597 if (!state->modeset)
1600 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1602 struct intel_connector *intel_connector;
1603 struct intel_encoder *encoder;
1604 struct intel_crtc *crtc;
1606 if (!intel_connector_needs_modeset(state, connector))
1609 intel_connector = to_intel_connector(connector);
1610 encoder = intel_connector_primary_encoder(intel_connector);
1611 if (!encoder->update_prepare)
1614 crtc = new_conn_state->crtc ?
1615 to_intel_crtc(new_conn_state->crtc) : NULL;
1616 encoder->update_prepare(state, encoder, crtc);
1620 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1622 struct drm_connector_state *new_conn_state;
1623 struct drm_connector *connector;
1626 if (!state->modeset)
1629 for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1631 struct intel_connector *intel_connector;
1632 struct intel_encoder *encoder;
1633 struct intel_crtc *crtc;
1635 if (!intel_connector_needs_modeset(state, connector))
1638 intel_connector = to_intel_connector(connector);
1639 encoder = intel_connector_primary_encoder(intel_connector);
1640 if (!encoder->update_complete)
1643 crtc = new_conn_state->crtc ?
1644 to_intel_crtc(new_conn_state->crtc) : NULL;
1645 encoder->update_complete(state, encoder, crtc);
1649 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1650 struct intel_crtc *crtc)
1652 const struct intel_crtc_state *crtc_state =
1653 intel_atomic_get_new_crtc_state(state, crtc);
1654 const struct drm_connector_state *conn_state;
1655 struct drm_connector *conn;
1658 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1659 struct intel_encoder *encoder =
1660 to_intel_encoder(conn_state->best_encoder);
1662 if (conn_state->crtc != &crtc->base)
1665 if (encoder->pre_pll_enable)
1666 encoder->pre_pll_enable(state, encoder,
1667 crtc_state, conn_state);
1671 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1672 struct intel_crtc *crtc)
1674 const struct intel_crtc_state *crtc_state =
1675 intel_atomic_get_new_crtc_state(state, crtc);
1676 const struct drm_connector_state *conn_state;
1677 struct drm_connector *conn;
1680 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1681 struct intel_encoder *encoder =
1682 to_intel_encoder(conn_state->best_encoder);
1684 if (conn_state->crtc != &crtc->base)
1687 if (encoder->pre_enable)
1688 encoder->pre_enable(state, encoder,
1689 crtc_state, conn_state);
1693 static void intel_encoders_enable(struct intel_atomic_state *state,
1694 struct intel_crtc *crtc)
1696 const struct intel_crtc_state *crtc_state =
1697 intel_atomic_get_new_crtc_state(state, crtc);
1698 const struct drm_connector_state *conn_state;
1699 struct drm_connector *conn;
1702 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1703 struct intel_encoder *encoder =
1704 to_intel_encoder(conn_state->best_encoder);
1706 if (conn_state->crtc != &crtc->base)
1709 if (encoder->enable)
1710 encoder->enable(state, encoder,
1711 crtc_state, conn_state);
1712 intel_opregion_notify_encoder(encoder, true);
1716 static void intel_encoders_disable(struct intel_atomic_state *state,
1717 struct intel_crtc *crtc)
1719 const struct intel_crtc_state *old_crtc_state =
1720 intel_atomic_get_old_crtc_state(state, crtc);
1721 const struct drm_connector_state *old_conn_state;
1722 struct drm_connector *conn;
1725 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1726 struct intel_encoder *encoder =
1727 to_intel_encoder(old_conn_state->best_encoder);
1729 if (old_conn_state->crtc != &crtc->base)
1732 intel_opregion_notify_encoder(encoder, false);
1733 if (encoder->disable)
1734 encoder->disable(state, encoder,
1735 old_crtc_state, old_conn_state);
1739 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1740 struct intel_crtc *crtc)
1742 const struct intel_crtc_state *old_crtc_state =
1743 intel_atomic_get_old_crtc_state(state, crtc);
1744 const struct drm_connector_state *old_conn_state;
1745 struct drm_connector *conn;
1748 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1749 struct intel_encoder *encoder =
1750 to_intel_encoder(old_conn_state->best_encoder);
1752 if (old_conn_state->crtc != &crtc->base)
1755 if (encoder->post_disable)
1756 encoder->post_disable(state, encoder,
1757 old_crtc_state, old_conn_state);
1761 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1762 struct intel_crtc *crtc)
1764 const struct intel_crtc_state *old_crtc_state =
1765 intel_atomic_get_old_crtc_state(state, crtc);
1766 const struct drm_connector_state *old_conn_state;
1767 struct drm_connector *conn;
1770 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1771 struct intel_encoder *encoder =
1772 to_intel_encoder(old_conn_state->best_encoder);
1774 if (old_conn_state->crtc != &crtc->base)
1777 if (encoder->post_pll_disable)
1778 encoder->post_pll_disable(state, encoder,
1779 old_crtc_state, old_conn_state);
1783 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1784 struct intel_crtc *crtc)
1786 const struct intel_crtc_state *crtc_state =
1787 intel_atomic_get_new_crtc_state(state, crtc);
1788 const struct drm_connector_state *conn_state;
1789 struct drm_connector *conn;
1792 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1793 struct intel_encoder *encoder =
1794 to_intel_encoder(conn_state->best_encoder);
1796 if (conn_state->crtc != &crtc->base)
1799 if (encoder->update_pipe)
1800 encoder->update_pipe(state, encoder,
1801 crtc_state, conn_state);
1805 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1807 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1808 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1810 plane->disable_arm(plane, crtc_state);
1813 static void ilk_crtc_enable(struct intel_atomic_state *state,
1814 struct intel_crtc *crtc)
1816 const struct intel_crtc_state *new_crtc_state =
1817 intel_atomic_get_new_crtc_state(state, crtc);
1818 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1819 enum pipe pipe = crtc->pipe;
1821 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1825 * Sometimes spurious CPU pipe underruns happen during FDI
1826 * training, at least with VGA+HDMI cloning. Suppress them.
1828 * On ILK we get an occasional spurious CPU pipe underruns
1829 * between eDP port A enable and vdd enable. Also PCH port
1830 * enable seems to result in the occasional CPU pipe underrun.
1832 * Spurious PCH underruns also occur during PCH enabling.
1834 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1835 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1837 if (intel_crtc_has_dp_encoder(new_crtc_state)) {
1838 if (new_crtc_state->has_pch_encoder)
1839 intel_pch_transcoder_set_m_n(new_crtc_state,
1840 &new_crtc_state->dp_m_n);
1842 intel_cpu_transcoder_set_m_n(new_crtc_state,
1843 &new_crtc_state->dp_m_n,
1844 &new_crtc_state->dp_m2_n2);
1847 intel_set_transcoder_timings(new_crtc_state);
1848 intel_set_pipe_src_size(new_crtc_state);
1850 if (new_crtc_state->has_pch_encoder)
1851 intel_cpu_transcoder_set_m_n(new_crtc_state,
1852 &new_crtc_state->fdi_m_n, NULL);
1854 ilk_set_pipeconf(new_crtc_state);
1856 crtc->active = true;
1858 intel_encoders_pre_enable(state, crtc);
1860 if (new_crtc_state->has_pch_encoder) {
1861 ilk_pch_pre_enable(state, crtc);
1863 assert_fdi_tx_disabled(dev_priv, pipe);
1864 assert_fdi_rx_disabled(dev_priv, pipe);
1867 ilk_pfit_enable(new_crtc_state);
1870 * On ILK+ LUT must be loaded before the pipe is running but with
1873 intel_color_load_luts(new_crtc_state);
1874 intel_color_commit(new_crtc_state);
1875 /* update DSPCNTR to configure gamma for pipe bottom color */
1876 intel_disable_primary_plane(new_crtc_state);
1878 intel_initial_watermarks(state, crtc);
1879 intel_enable_transcoder(new_crtc_state);
1881 if (new_crtc_state->has_pch_encoder)
1882 ilk_pch_enable(state, crtc);
1884 intel_crtc_vblank_on(new_crtc_state);
1886 intel_encoders_enable(state, crtc);
1888 if (HAS_PCH_CPT(dev_priv))
1889 cpt_verify_modeset(dev_priv, pipe);
1892 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1893 * And a second vblank wait is needed at least on ILK with
1894 * some interlaced HDMI modes. Let's do the double wait always
1895 * in case there are more corner cases we don't know about.
1897 if (new_crtc_state->has_pch_encoder) {
1898 intel_crtc_wait_for_next_vblank(crtc);
1899 intel_crtc_wait_for_next_vblank(crtc);
1901 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1902 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1905 /* IPS only exists on ULT machines and is tied to pipe A. */
1906 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1908 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1911 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1912 enum pipe pipe, bool apply)
1914 u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1915 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1922 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1925 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1927 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1928 enum pipe pipe = crtc->pipe;
1931 /* Wa_22010947358:adl-p */
1932 if (IS_ALDERLAKE_P(dev_priv))
1933 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1935 val = MBUS_DBOX_A_CREDIT(2);
1937 if (DISPLAY_VER(dev_priv) >= 12) {
1938 val |= MBUS_DBOX_BW_CREDIT(2);
1939 val |= MBUS_DBOX_B_CREDIT(12);
1941 val |= MBUS_DBOX_BW_CREDIT(1);
1942 val |= MBUS_DBOX_B_CREDIT(8);
1945 intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1948 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1950 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1953 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1954 HSW_LINETIME(crtc_state->linetime) |
1955 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1958 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1960 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1961 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1962 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1965 val = intel_de_read(dev_priv, reg);
1966 val &= ~HSW_FRAME_START_DELAY_MASK;
1967 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1968 intel_de_write(dev_priv, reg, val);
1971 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1972 const struct intel_crtc_state *crtc_state)
1974 struct intel_crtc_state *master_crtc_state;
1975 struct intel_crtc *master_crtc;
1976 struct drm_connector_state *conn_state;
1977 struct drm_connector *conn;
1978 struct intel_encoder *encoder = NULL;
1981 master_crtc = intel_master_crtc(crtc_state);
1982 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1984 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1985 if (conn_state->crtc != &master_crtc->base)
1988 encoder = to_intel_encoder(conn_state->best_encoder);
1993 * Enable sequence steps 1-7 on bigjoiner master
1995 if (crtc_state->bigjoiner_slave)
1996 intel_encoders_pre_pll_enable(state, master_crtc);
1998 if (crtc_state->shared_dpll)
1999 intel_enable_shared_dpll(crtc_state);
2001 if (crtc_state->bigjoiner_slave)
2002 intel_encoders_pre_enable(state, master_crtc);
2005 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2007 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2008 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2009 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2011 intel_set_transcoder_timings(crtc_state);
2013 if (cpu_transcoder != TRANSCODER_EDP)
2014 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2015 crtc_state->pixel_multiplier - 1);
2017 if (crtc_state->has_pch_encoder)
2018 intel_cpu_transcoder_set_m_n(crtc_state,
2019 &crtc_state->fdi_m_n, NULL);
2021 hsw_set_frame_start_delay(crtc_state);
2023 hsw_set_transconf(crtc_state);
2026 static void hsw_crtc_enable(struct intel_atomic_state *state,
2027 struct intel_crtc *crtc)
2029 const struct intel_crtc_state *new_crtc_state =
2030 intel_atomic_get_new_crtc_state(state, crtc);
2031 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2032 enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2033 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2034 bool psl_clkgate_wa;
2036 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2039 if (!new_crtc_state->bigjoiner) {
2040 intel_encoders_pre_pll_enable(state, crtc);
2042 if (new_crtc_state->shared_dpll)
2043 intel_enable_shared_dpll(new_crtc_state);
2045 intel_encoders_pre_enable(state, crtc);
2047 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2050 intel_dsc_enable(new_crtc_state);
2052 if (DISPLAY_VER(dev_priv) >= 13)
2053 intel_uncompressed_joiner_enable(new_crtc_state);
2055 intel_set_pipe_src_size(new_crtc_state);
2056 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2057 bdw_set_pipemisc(new_crtc_state);
2059 if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder))
2060 hsw_configure_cpu_transcoder(new_crtc_state);
2062 crtc->active = true;
2064 /* Display WA #1180: WaDisableScalarClockGating: glk */
2065 psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2066 new_crtc_state->pch_pfit.enabled;
2068 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2070 if (DISPLAY_VER(dev_priv) >= 9)
2071 skl_pfit_enable(new_crtc_state);
2073 ilk_pfit_enable(new_crtc_state);
2076 * On ILK+ LUT must be loaded before the pipe is running but with
2079 intel_color_load_luts(new_crtc_state);
2080 intel_color_commit(new_crtc_state);
2081 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
2082 if (DISPLAY_VER(dev_priv) < 9)
2083 intel_disable_primary_plane(new_crtc_state);
2085 hsw_set_linetime_wm(new_crtc_state);
2087 if (DISPLAY_VER(dev_priv) >= 11)
2088 icl_set_pipe_chicken(new_crtc_state);
2090 intel_initial_watermarks(state, crtc);
2092 if (DISPLAY_VER(dev_priv) >= 11) {
2093 const struct intel_dbuf_state *dbuf_state =
2094 intel_atomic_get_new_dbuf_state(state);
2096 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2099 if (new_crtc_state->bigjoiner_slave)
2100 intel_crtc_vblank_on(new_crtc_state);
2102 intel_encoders_enable(state, crtc);
2104 if (psl_clkgate_wa) {
2105 intel_crtc_wait_for_next_vblank(crtc);
2106 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2109 /* If we change the relative order between pipe/planes enabling, we need
2110 * to change the workaround. */
2111 hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2112 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2113 struct intel_crtc *wa_crtc;
2115 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
2117 intel_crtc_wait_for_next_vblank(wa_crtc);
2118 intel_crtc_wait_for_next_vblank(wa_crtc);
2122 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2124 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2126 enum pipe pipe = crtc->pipe;
2128 /* To avoid upsetting the power well on haswell only disable the pfit if
2129 * it's in use. The hw state code will make sure we get this right. */
2130 if (!old_crtc_state->pch_pfit.enabled)
2133 intel_de_write(dev_priv, PF_CTL(pipe), 0);
2134 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2135 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2138 static void ilk_crtc_disable(struct intel_atomic_state *state,
2139 struct intel_crtc *crtc)
2141 const struct intel_crtc_state *old_crtc_state =
2142 intel_atomic_get_old_crtc_state(state, crtc);
2143 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2144 enum pipe pipe = crtc->pipe;
2147 * Sometimes spurious CPU pipe underruns happen when the
2148 * pipe is already disabled, but FDI RX/TX is still enabled.
2149 * Happens at least with VGA+HDMI cloning. Suppress them.
2151 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2152 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2154 intel_encoders_disable(state, crtc);
2156 intel_crtc_vblank_off(old_crtc_state);
2158 intel_disable_transcoder(old_crtc_state);
2160 ilk_pfit_disable(old_crtc_state);
2162 if (old_crtc_state->has_pch_encoder)
2163 ilk_pch_disable(state, crtc);
2165 intel_encoders_post_disable(state, crtc);
2167 if (old_crtc_state->has_pch_encoder)
2168 ilk_pch_post_disable(state, crtc);
2170 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2171 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2174 static void hsw_crtc_disable(struct intel_atomic_state *state,
2175 struct intel_crtc *crtc)
2177 const struct intel_crtc_state *old_crtc_state =
2178 intel_atomic_get_old_crtc_state(state, crtc);
2181 * FIXME collapse everything to one hook.
2182 * Need care with mst->ddi interactions.
2184 if (!old_crtc_state->bigjoiner_slave) {
2185 intel_encoders_disable(state, crtc);
2186 intel_encoders_post_disable(state, crtc);
2190 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2192 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2193 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2195 if (!crtc_state->gmch_pfit.control)
2199 * The panel fitter should only be adjusted whilst the pipe is disabled,
2200 * according to register description and PRM.
2202 drm_WARN_ON(&dev_priv->drm,
2203 intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2204 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2206 intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2207 crtc_state->gmch_pfit.pgm_ratios);
2208 intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2210 /* Border color in case we don't scale up to the full screen. Black by
2211 * default, change to something else for debugging. */
2212 intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2215 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2217 if (phy == PHY_NONE)
2219 else if (IS_DG2(dev_priv))
2221 * DG2 outputs labelled as "combo PHY" in the bspec use
2222 * SNPS PHYs with completely different programming,
2223 * hence we always return false here.
2226 else if (IS_ALDERLAKE_S(dev_priv))
2227 return phy <= PHY_E;
2228 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2229 return phy <= PHY_D;
2230 else if (IS_JSL_EHL(dev_priv))
2231 return phy <= PHY_C;
2232 else if (DISPLAY_VER(dev_priv) >= 11)
2233 return phy <= PHY_B;
2238 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2240 if (IS_DG2(dev_priv))
2241 /* DG2's "TC1" output uses a SNPS PHY */
2243 else if (IS_ALDERLAKE_P(dev_priv))
2244 return phy >= PHY_F && phy <= PHY_I;
2245 else if (IS_TIGERLAKE(dev_priv))
2246 return phy >= PHY_D && phy <= PHY_I;
2247 else if (IS_ICELAKE(dev_priv))
2248 return phy >= PHY_C && phy <= PHY_F;
2253 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2255 if (phy == PHY_NONE)
2257 else if (IS_DG2(dev_priv))
2259 * All four "combo" ports and the TC1 port (PHY E) use
2262 return phy <= PHY_E;
2267 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2269 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2270 return PHY_D + port - PORT_D_XELPD;
2271 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2272 return PHY_F + port - PORT_TC1;
2273 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2274 return PHY_B + port - PORT_TC1;
2275 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2276 return PHY_C + port - PORT_TC1;
2277 else if (IS_JSL_EHL(i915) && port == PORT_D)
2280 return PHY_A + port - PORT_A;
2283 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2285 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2286 return TC_PORT_NONE;
2288 if (DISPLAY_VER(dev_priv) >= 12)
2289 return TC_PORT_1 + port - PORT_TC1;
2291 return TC_PORT_1 + port - PORT_C;
2294 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2298 return POWER_DOMAIN_PORT_DDI_A_LANES;
2300 return POWER_DOMAIN_PORT_DDI_B_LANES;
2302 return POWER_DOMAIN_PORT_DDI_C_LANES;
2304 return POWER_DOMAIN_PORT_DDI_D_LANES;
2306 return POWER_DOMAIN_PORT_DDI_E_LANES;
2308 return POWER_DOMAIN_PORT_DDI_F_LANES;
2310 return POWER_DOMAIN_PORT_DDI_G_LANES;
2312 return POWER_DOMAIN_PORT_DDI_H_LANES;
2314 return POWER_DOMAIN_PORT_DDI_I_LANES;
2317 return POWER_DOMAIN_PORT_OTHER;
2321 enum intel_display_power_domain
2322 intel_aux_power_domain(struct intel_digital_port *dig_port)
2324 if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2325 switch (dig_port->aux_ch) {
2327 return POWER_DOMAIN_AUX_C_TBT;
2329 return POWER_DOMAIN_AUX_D_TBT;
2331 return POWER_DOMAIN_AUX_E_TBT;
2333 return POWER_DOMAIN_AUX_F_TBT;
2335 return POWER_DOMAIN_AUX_G_TBT;
2337 return POWER_DOMAIN_AUX_H_TBT;
2339 return POWER_DOMAIN_AUX_I_TBT;
2341 MISSING_CASE(dig_port->aux_ch);
2342 return POWER_DOMAIN_AUX_C_TBT;
2346 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2350 * Converts aux_ch to power_domain without caring about TBT ports for that use
2351 * intel_aux_power_domain()
2353 enum intel_display_power_domain
2354 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2358 return POWER_DOMAIN_AUX_A;
2360 return POWER_DOMAIN_AUX_B;
2362 return POWER_DOMAIN_AUX_C;
2364 return POWER_DOMAIN_AUX_D;
2366 return POWER_DOMAIN_AUX_E;
2368 return POWER_DOMAIN_AUX_F;
2370 return POWER_DOMAIN_AUX_G;
2372 return POWER_DOMAIN_AUX_H;
2374 return POWER_DOMAIN_AUX_I;
2376 MISSING_CASE(aux_ch);
2377 return POWER_DOMAIN_AUX_A;
2381 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2383 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2384 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2385 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2386 struct drm_encoder *encoder;
2387 enum pipe pipe = crtc->pipe;
2390 if (!crtc_state->hw.active)
2393 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2394 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2395 if (crtc_state->pch_pfit.enabled ||
2396 crtc_state->pch_pfit.force_thru)
2397 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2399 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2400 crtc_state->uapi.encoder_mask) {
2401 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2403 mask |= BIT_ULL(intel_encoder->power_domain);
2406 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2407 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2409 if (crtc_state->shared_dpll)
2410 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2412 if (crtc_state->dsc.compression_enable)
2413 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2419 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2421 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2423 enum intel_display_power_domain domain;
2424 u64 domains, new_domains, old_domains;
2426 domains = get_crtc_power_domains(crtc_state);
2428 new_domains = domains & ~crtc->enabled_power_domains.mask;
2429 old_domains = crtc->enabled_power_domains.mask & ~domains;
2431 for_each_power_domain(domain, new_domains)
2432 intel_display_power_get_in_set(dev_priv,
2433 &crtc->enabled_power_domains,
2439 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2442 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2443 &crtc->enabled_power_domains,
2447 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2448 struct intel_crtc *crtc)
2450 const struct intel_crtc_state *new_crtc_state =
2451 intel_atomic_get_new_crtc_state(state, crtc);
2452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2453 enum pipe pipe = crtc->pipe;
2455 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2458 if (intel_crtc_has_dp_encoder(new_crtc_state))
2459 intel_cpu_transcoder_set_m_n(new_crtc_state,
2460 &new_crtc_state->dp_m_n,
2461 &new_crtc_state->dp_m2_n2);
2463 intel_set_transcoder_timings(new_crtc_state);
2464 intel_set_pipe_src_size(new_crtc_state);
2466 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2467 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2468 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2471 i9xx_set_pipeconf(new_crtc_state);
2473 crtc->active = true;
2475 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2477 intel_encoders_pre_pll_enable(state, crtc);
2479 if (IS_CHERRYVIEW(dev_priv))
2480 chv_enable_pll(new_crtc_state);
2482 vlv_enable_pll(new_crtc_state);
2484 intel_encoders_pre_enable(state, crtc);
2486 i9xx_pfit_enable(new_crtc_state);
2488 intel_color_load_luts(new_crtc_state);
2489 intel_color_commit(new_crtc_state);
2490 /* update DSPCNTR to configure gamma for pipe bottom color */
2491 intel_disable_primary_plane(new_crtc_state);
2493 intel_initial_watermarks(state, crtc);
2494 intel_enable_transcoder(new_crtc_state);
2496 intel_crtc_vblank_on(new_crtc_state);
2498 intel_encoders_enable(state, crtc);
2501 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2502 struct intel_crtc *crtc)
2504 const struct intel_crtc_state *new_crtc_state =
2505 intel_atomic_get_new_crtc_state(state, crtc);
2506 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2507 enum pipe pipe = crtc->pipe;
2509 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2512 if (intel_crtc_has_dp_encoder(new_crtc_state))
2513 intel_cpu_transcoder_set_m_n(new_crtc_state,
2514 &new_crtc_state->dp_m_n,
2515 &new_crtc_state->dp_m2_n2);
2517 intel_set_transcoder_timings(new_crtc_state);
2518 intel_set_pipe_src_size(new_crtc_state);
2520 i9xx_set_pipeconf(new_crtc_state);
2522 crtc->active = true;
2524 if (DISPLAY_VER(dev_priv) != 2)
2525 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2527 intel_encoders_pre_enable(state, crtc);
2529 i9xx_enable_pll(new_crtc_state);
2531 i9xx_pfit_enable(new_crtc_state);
2533 intel_color_load_luts(new_crtc_state);
2534 intel_color_commit(new_crtc_state);
2535 /* update DSPCNTR to configure gamma for pipe bottom color */
2536 intel_disable_primary_plane(new_crtc_state);
2538 if (!intel_initial_watermarks(state, crtc))
2539 intel_update_watermarks(dev_priv);
2540 intel_enable_transcoder(new_crtc_state);
2542 intel_crtc_vblank_on(new_crtc_state);
2544 intel_encoders_enable(state, crtc);
2546 /* prevents spurious underruns */
2547 if (DISPLAY_VER(dev_priv) == 2)
2548 intel_crtc_wait_for_next_vblank(crtc);
2551 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2553 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2554 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2556 if (!old_crtc_state->gmch_pfit.control)
2559 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2561 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2562 intel_de_read(dev_priv, PFIT_CONTROL));
2563 intel_de_write(dev_priv, PFIT_CONTROL, 0);
2566 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2567 struct intel_crtc *crtc)
2569 struct intel_crtc_state *old_crtc_state =
2570 intel_atomic_get_old_crtc_state(state, crtc);
2571 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2572 enum pipe pipe = crtc->pipe;
2575 * On gen2 planes are double buffered but the pipe isn't, so we must
2576 * wait for planes to fully turn off before disabling the pipe.
2578 if (DISPLAY_VER(dev_priv) == 2)
2579 intel_crtc_wait_for_next_vblank(crtc);
2581 intel_encoders_disable(state, crtc);
2583 intel_crtc_vblank_off(old_crtc_state);
2585 intel_disable_transcoder(old_crtc_state);
2587 i9xx_pfit_disable(old_crtc_state);
2589 intel_encoders_post_disable(state, crtc);
2591 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2592 if (IS_CHERRYVIEW(dev_priv))
2593 chv_disable_pll(dev_priv, pipe);
2594 else if (IS_VALLEYVIEW(dev_priv))
2595 vlv_disable_pll(dev_priv, pipe);
2597 i9xx_disable_pll(old_crtc_state);
2600 intel_encoders_post_pll_disable(state, crtc);
2602 if (DISPLAY_VER(dev_priv) != 2)
2603 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2605 if (!dev_priv->wm_disp->initial_watermarks)
2606 intel_update_watermarks(dev_priv);
2608 /* clock the pipe down to 640x480@60 to potentially save power */
2609 if (IS_I830(dev_priv))
2610 i830_enable_pipe(dev_priv, pipe);
2613 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2614 struct drm_modeset_acquire_ctx *ctx)
2616 struct intel_encoder *encoder;
2617 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2618 struct intel_bw_state *bw_state =
2619 to_intel_bw_state(dev_priv->bw_obj.state);
2620 struct intel_cdclk_state *cdclk_state =
2621 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2622 struct intel_dbuf_state *dbuf_state =
2623 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2624 struct intel_crtc_state *crtc_state =
2625 to_intel_crtc_state(crtc->base.state);
2626 struct intel_plane *plane;
2627 struct drm_atomic_state *state;
2628 struct intel_crtc_state *temp_crtc_state;
2629 enum pipe pipe = crtc->pipe;
2632 if (!crtc_state->hw.active)
2635 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2636 const struct intel_plane_state *plane_state =
2637 to_intel_plane_state(plane->base.state);
2639 if (plane_state->uapi.visible)
2640 intel_plane_disable_noatomic(crtc, plane);
2643 state = drm_atomic_state_alloc(&dev_priv->drm);
2645 drm_dbg_kms(&dev_priv->drm,
2646 "failed to disable [CRTC:%d:%s], out of memory",
2647 crtc->base.base.id, crtc->base.name);
2651 state->acquire_ctx = ctx;
2653 /* Everything's already locked, -EDEADLK can't happen. */
2654 temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2655 ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2657 drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2659 dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2661 drm_atomic_state_put(state);
2663 drm_dbg_kms(&dev_priv->drm,
2664 "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2665 crtc->base.base.id, crtc->base.name);
2667 crtc->active = false;
2668 crtc->base.enabled = false;
2670 drm_WARN_ON(&dev_priv->drm,
2671 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2672 crtc_state->uapi.active = false;
2673 crtc_state->uapi.connector_mask = 0;
2674 crtc_state->uapi.encoder_mask = 0;
2675 intel_crtc_free_hw_state(crtc_state);
2676 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2678 for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2679 encoder->base.crtc = NULL;
2681 intel_fbc_disable(crtc);
2682 intel_update_watermarks(dev_priv);
2683 intel_disable_shared_dpll(crtc_state);
2685 intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2687 cdclk_state->min_cdclk[pipe] = 0;
2688 cdclk_state->min_voltage_level[pipe] = 0;
2689 cdclk_state->active_pipes &= ~BIT(pipe);
2691 dbuf_state->active_pipes &= ~BIT(pipe);
2693 bw_state->data_rate[pipe] = 0;
2694 bw_state->num_active_planes[pipe] = 0;
2698 * turn all crtc's off, but do not adjust state
2699 * This has to be paired with a call to intel_modeset_setup_hw_state.
2701 int intel_display_suspend(struct drm_device *dev)
2703 struct drm_i915_private *dev_priv = to_i915(dev);
2704 struct drm_atomic_state *state;
2707 if (!HAS_DISPLAY(dev_priv))
2710 state = drm_atomic_helper_suspend(dev);
2711 ret = PTR_ERR_OR_ZERO(state);
2713 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2716 dev_priv->modeset_restore_state = state;
2720 void intel_encoder_destroy(struct drm_encoder *encoder)
2722 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2724 drm_encoder_cleanup(encoder);
2725 kfree(intel_encoder);
2728 /* Cross check the actual hw state with our own modeset state tracking (and it's
2729 * internal consistency). */
2730 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2731 struct drm_connector_state *conn_state)
2733 struct intel_connector *connector = to_intel_connector(conn_state->connector);
2734 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2736 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2737 connector->base.base.id, connector->base.name);
2739 if (connector->get_hw_state(connector)) {
2740 struct intel_encoder *encoder = intel_attached_encoder(connector);
2742 I915_STATE_WARN(!crtc_state,
2743 "connector enabled without attached crtc\n");
2748 I915_STATE_WARN(!crtc_state->hw.active,
2749 "connector is active, but attached crtc isn't\n");
2751 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2754 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2755 "atomic encoder doesn't match attached encoder\n");
2757 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2758 "attached encoder crtc differs from connector crtc\n");
2760 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2761 "attached crtc is active, but connector isn't\n");
2762 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2763 "best encoder set without crtc!\n");
2767 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2769 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2772 /* IPS only exists on ULT machines and is tied to pipe A. */
2773 if (!hsw_crtc_supports_ips(crtc))
2776 if (!dev_priv->params.enable_ips)
2779 if (crtc_state->pipe_bpp > 24)
2783 * We compare against max which means we must take
2784 * the increased cdclk requirement into account when
2785 * calculating the new cdclk.
2787 * Should measure whether using a lower cdclk w/o IPS
2789 if (IS_BROADWELL(dev_priv) &&
2790 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2796 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2798 struct drm_i915_private *dev_priv =
2799 to_i915(crtc_state->uapi.crtc->dev);
2800 struct intel_atomic_state *state =
2801 to_intel_atomic_state(crtc_state->uapi.state);
2803 crtc_state->ips_enabled = false;
2805 if (!hsw_crtc_state_ips_capable(crtc_state))
2809 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2810 * enabled and disabled dynamically based on package C states,
2811 * user space can't make reliable use of the CRCs, so let's just
2812 * completely disable it.
2814 if (crtc_state->crc_enabled)
2817 /* IPS should be fine as long as at least one plane is enabled. */
2818 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2821 if (IS_BROADWELL(dev_priv)) {
2822 const struct intel_cdclk_state *cdclk_state;
2824 cdclk_state = intel_atomic_get_cdclk_state(state);
2825 if (IS_ERR(cdclk_state))
2826 return PTR_ERR(cdclk_state);
2828 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2829 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2833 crtc_state->ips_enabled = true;
2838 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2840 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2842 /* GDG double wide on either pipe, otherwise pipe A only */
2843 return DISPLAY_VER(dev_priv) < 4 &&
2844 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2847 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2849 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2850 struct drm_rect src;
2853 * We only use IF-ID interlacing. If we ever use
2854 * PF-ID we'll need to adjust the pixel_rate here.
2857 if (!crtc_state->pch_pfit.enabled)
2860 drm_rect_init(&src, 0, 0,
2861 crtc_state->pipe_src_w << 16,
2862 crtc_state->pipe_src_h << 16);
2864 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2868 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2869 const struct drm_display_mode *timings)
2871 mode->hdisplay = timings->crtc_hdisplay;
2872 mode->htotal = timings->crtc_htotal;
2873 mode->hsync_start = timings->crtc_hsync_start;
2874 mode->hsync_end = timings->crtc_hsync_end;
2876 mode->vdisplay = timings->crtc_vdisplay;
2877 mode->vtotal = timings->crtc_vtotal;
2878 mode->vsync_start = timings->crtc_vsync_start;
2879 mode->vsync_end = timings->crtc_vsync_end;
2881 mode->flags = timings->flags;
2882 mode->type = DRM_MODE_TYPE_DRIVER;
2884 mode->clock = timings->crtc_clock;
2886 drm_mode_set_name(mode);
2889 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2891 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2893 if (HAS_GMCH(dev_priv))
2894 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2895 crtc_state->pixel_rate =
2896 crtc_state->hw.pipe_mode.crtc_clock;
2898 crtc_state->pixel_rate =
2899 ilk_pipe_pixel_rate(crtc_state);
2902 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2904 struct drm_display_mode *mode = &crtc_state->hw.mode;
2905 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2906 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2908 drm_mode_copy(pipe_mode, adjusted_mode);
2910 if (crtc_state->bigjoiner) {
2912 * transcoder is programmed to the full mode,
2913 * but pipe timings are half of the transcoder mode
2915 pipe_mode->crtc_hdisplay /= 2;
2916 pipe_mode->crtc_hblank_start /= 2;
2917 pipe_mode->crtc_hblank_end /= 2;
2918 pipe_mode->crtc_hsync_start /= 2;
2919 pipe_mode->crtc_hsync_end /= 2;
2920 pipe_mode->crtc_htotal /= 2;
2921 pipe_mode->crtc_clock /= 2;
2924 if (crtc_state->splitter.enable) {
2925 int n = crtc_state->splitter.link_count;
2926 int overlap = crtc_state->splitter.pixel_overlap;
2929 * eDP MSO uses segment timings from EDID for transcoder
2930 * timings, but full mode for everything else.
2932 * h_full = (h_segment - pixel_overlap) * link_count
2934 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2935 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2936 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2937 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2938 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2939 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2940 pipe_mode->crtc_clock *= n;
2942 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2943 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2945 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2946 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2949 intel_crtc_compute_pixel_rate(crtc_state);
2951 drm_mode_copy(mode, adjusted_mode);
2952 mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2953 mode->vdisplay = crtc_state->pipe_src_h;
2956 static void intel_encoder_get_config(struct intel_encoder *encoder,
2957 struct intel_crtc_state *crtc_state)
2959 encoder->get_config(encoder, crtc_state);
2961 intel_crtc_readout_derived_state(crtc_state);
2964 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2965 struct intel_crtc_state *pipe_config)
2967 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2968 struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2969 int clock_limit = dev_priv->max_dotclk_freq;
2971 drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2973 /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2974 if (pipe_config->bigjoiner) {
2975 pipe_mode->crtc_clock /= 2;
2976 pipe_mode->crtc_hdisplay /= 2;
2977 pipe_mode->crtc_hblank_start /= 2;
2978 pipe_mode->crtc_hblank_end /= 2;
2979 pipe_mode->crtc_hsync_start /= 2;
2980 pipe_mode->crtc_hsync_end /= 2;
2981 pipe_mode->crtc_htotal /= 2;
2982 pipe_config->pipe_src_w /= 2;
2985 if (pipe_config->splitter.enable) {
2986 int n = pipe_config->splitter.link_count;
2987 int overlap = pipe_config->splitter.pixel_overlap;
2989 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2990 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2991 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2992 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2993 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2994 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2995 pipe_mode->crtc_clock *= n;
2998 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
3000 if (DISPLAY_VER(dev_priv) < 4) {
3001 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
3004 * Enable double wide mode when the dot clock
3005 * is > 90% of the (display) core speed.
3007 if (intel_crtc_supports_double_wide(crtc) &&
3008 pipe_mode->crtc_clock > clock_limit) {
3009 clock_limit = dev_priv->max_dotclk_freq;
3010 pipe_config->double_wide = true;
3014 if (pipe_mode->crtc_clock > clock_limit) {
3015 drm_dbg_kms(&dev_priv->drm,
3016 "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3017 pipe_mode->crtc_clock, clock_limit,
3018 yesno(pipe_config->double_wide));
3023 * Pipe horizontal size must be even in:
3025 * - LVDS dual channel mode
3026 * - Double wide pipe
3028 if (pipe_config->pipe_src_w & 1) {
3029 if (pipe_config->double_wide) {
3030 drm_dbg_kms(&dev_priv->drm,
3031 "Odd pipe source width not supported with double wide pipe\n");
3035 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3036 intel_is_dual_link_lvds(dev_priv)) {
3037 drm_dbg_kms(&dev_priv->drm,
3038 "Odd pipe source width not supported with dual link LVDS\n");
3043 intel_crtc_compute_pixel_rate(pipe_config);
3045 if (pipe_config->has_pch_encoder)
3046 return ilk_fdi_compute_config(crtc, pipe_config);
3052 intel_reduce_m_n_ratio(u32 *num, u32 *den)
3054 while (*num > DATA_LINK_M_N_MASK ||
3055 *den > DATA_LINK_M_N_MASK) {
3061 static void compute_m_n(unsigned int m, unsigned int n,
3062 u32 *ret_m, u32 *ret_n,
3066 * Several DP dongles in particular seem to be fussy about
3067 * too large link M/N values. Give N value as 0x8000 that
3068 * should be acceptable by specific devices. 0x8000 is the
3069 * specified fixed N value for asynchronous clock mode,
3070 * which the devices expect also in synchronous clock mode.
3073 *ret_n = DP_LINK_CONSTANT_N_VALUE;
3075 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3077 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3078 intel_reduce_m_n_ratio(ret_m, ret_n);
3082 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3083 int pixel_clock, int link_clock,
3084 struct intel_link_m_n *m_n,
3085 bool constant_n, bool fec_enable)
3087 u32 data_clock = bits_per_pixel * pixel_clock;
3090 data_clock = intel_dp_mode_to_fec_clock(data_clock);
3093 compute_m_n(data_clock,
3094 link_clock * nlanes * 8,
3095 &m_n->data_m, &m_n->data_n,
3098 compute_m_n(pixel_clock, link_clock,
3099 &m_n->link_m, &m_n->link_n,
3103 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3106 * There may be no VBT; and if the BIOS enabled SSC we can
3107 * just keep using it to avoid unnecessary flicker. Whereas if the
3108 * BIOS isn't using it, don't assume it will work even if the VBT
3109 * indicates as much.
3111 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3112 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3116 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3117 drm_dbg_kms(&dev_priv->drm,
3118 "SSC %s by BIOS, overriding VBT which says %s\n",
3119 enableddisabled(bios_lvds_use_ssc),
3120 enableddisabled(dev_priv->vbt.lvds_use_ssc));
3121 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3126 static void intel_set_m_n(struct drm_i915_private *i915,
3127 const struct intel_link_m_n *m_n,
3128 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3129 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3131 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
3132 intel_de_write(i915, data_n_reg, m_n->data_n);
3133 intel_de_write(i915, link_m_reg, m_n->link_m);
3134 intel_de_write(i915, link_n_reg, m_n->link_n);
3137 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3138 const struct intel_link_m_n *m_n)
3140 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3141 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3142 enum pipe pipe = crtc->pipe;
3144 intel_set_m_n(dev_priv, m_n,
3145 PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
3146 PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
3149 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3150 enum transcoder transcoder)
3152 if (IS_HASWELL(dev_priv))
3153 return transcoder == TRANSCODER_EDP;
3156 * Strictly speaking some registers are available before
3157 * gen7, but we only support DRRS on gen7+
3159 return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3162 void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3163 const struct intel_link_m_n *m_n,
3164 const struct intel_link_m_n *m2_n2)
3166 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3168 enum pipe pipe = crtc->pipe;
3169 enum transcoder transcoder = crtc_state->cpu_transcoder;
3171 if (DISPLAY_VER(dev_priv) >= 5) {
3172 intel_set_m_n(dev_priv, m_n,
3173 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3174 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3176 * M2_N2 registers are set only if DRRS is supported
3177 * (to make sure the registers are not unnecessarily accessed).
3179 if (m2_n2 && crtc_state->has_drrs &&
3180 transcoder_has_m2_n2(dev_priv, transcoder)) {
3181 intel_set_m_n(dev_priv, m2_n2,
3182 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3183 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3186 intel_set_m_n(dev_priv, m_n,
3187 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3188 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3192 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3195 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3196 enum pipe pipe = crtc->pipe;
3197 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3198 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3199 u32 crtc_vtotal, crtc_vblank_end;
3202 /* We need to be careful not to changed the adjusted mode, for otherwise
3203 * the hw state checker will get angry at the mismatch. */
3204 crtc_vtotal = adjusted_mode->crtc_vtotal;
3205 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3207 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3208 /* the chip adds 2 halflines automatically */
3210 crtc_vblank_end -= 1;
3212 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3213 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3215 vsyncshift = adjusted_mode->crtc_hsync_start -
3216 adjusted_mode->crtc_htotal / 2;
3218 vsyncshift += adjusted_mode->crtc_htotal;
3221 if (DISPLAY_VER(dev_priv) > 3)
3222 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3225 intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3226 (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3227 intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3228 (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3229 intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3230 (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3232 intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3233 (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3234 intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3235 (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3236 intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3237 (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3239 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3240 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3241 * documented on the DDI_FUNC_CTL register description, EDP Input Select
3243 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3244 (pipe == PIPE_B || pipe == PIPE_C))
3245 intel_de_write(dev_priv, VTOTAL(pipe),
3246 intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3250 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3252 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3253 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3254 enum pipe pipe = crtc->pipe;
3256 /* pipesrc controls the size that is scaled from, which should
3257 * always be the user's requested size.
3259 intel_de_write(dev_priv, PIPESRC(pipe),
3260 PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) |
3261 PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1));
3264 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3266 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3267 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3269 if (DISPLAY_VER(dev_priv) == 2)
3272 if (DISPLAY_VER(dev_priv) >= 9 ||
3273 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3274 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3276 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3279 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3280 struct intel_crtc_state *pipe_config)
3282 struct drm_device *dev = crtc->base.dev;
3283 struct drm_i915_private *dev_priv = to_i915(dev);
3284 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3287 tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3288 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3289 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3291 if (!transcoder_is_dsi(cpu_transcoder)) {
3292 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3293 pipe_config->hw.adjusted_mode.crtc_hblank_start =
3295 pipe_config->hw.adjusted_mode.crtc_hblank_end =
3296 ((tmp >> 16) & 0xffff) + 1;
3298 tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3299 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3300 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3302 tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3303 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3304 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3306 if (!transcoder_is_dsi(cpu_transcoder)) {
3307 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3308 pipe_config->hw.adjusted_mode.crtc_vblank_start =
3310 pipe_config->hw.adjusted_mode.crtc_vblank_end =
3311 ((tmp >> 16) & 0xffff) + 1;
3313 tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3314 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3315 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3317 if (intel_pipe_is_interlaced(pipe_config)) {
3318 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3319 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3320 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3324 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3325 struct intel_crtc_state *pipe_config)
3327 struct drm_device *dev = crtc->base.dev;
3328 struct drm_i915_private *dev_priv = to_i915(dev);
3331 tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3332 pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1;
3333 pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1;
3336 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3338 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3339 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3344 /* we keep both pipes enabled on 830 */
3345 if (IS_I830(dev_priv))
3346 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3348 if (crtc_state->double_wide)
3349 pipeconf |= PIPECONF_DOUBLE_WIDE;
3351 /* only g4x and later have fancy bpc/dither controls */
3352 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3353 IS_CHERRYVIEW(dev_priv)) {
3354 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3355 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3356 pipeconf |= PIPECONF_DITHER_EN |
3357 PIPECONF_DITHER_TYPE_SP;
3359 switch (crtc_state->pipe_bpp) {
3361 pipeconf |= PIPECONF_BPC_6;
3364 pipeconf |= PIPECONF_BPC_8;
3367 pipeconf |= PIPECONF_BPC_10;
3370 /* Case prevented by intel_choose_pipe_bpp_dither. */
3375 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3376 if (DISPLAY_VER(dev_priv) < 4 ||
3377 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3378 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3380 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3382 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
3385 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3386 crtc_state->limited_color_range)
3387 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3389 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3391 pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3393 intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3394 intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3397 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3399 if (IS_I830(dev_priv))
3402 return DISPLAY_VER(dev_priv) >= 4 ||
3403 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3406 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3408 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3409 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3412 if (!i9xx_has_pfit(dev_priv))
3415 tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3416 if (!(tmp & PFIT_ENABLE))
3419 /* Check whether the pfit is attached to our pipe. */
3420 if (DISPLAY_VER(dev_priv) < 4) {
3421 if (crtc->pipe != PIPE_B)
3424 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3428 crtc_state->gmch_pfit.control = tmp;
3429 crtc_state->gmch_pfit.pgm_ratios =
3430 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3433 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3434 struct intel_crtc_state *pipe_config)
3436 struct drm_device *dev = crtc->base.dev;
3437 struct drm_i915_private *dev_priv = to_i915(dev);
3438 enum pipe pipe = crtc->pipe;
3441 int refclk = 100000;
3443 /* In case of DSI, DPLL will not be used */
3444 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3447 vlv_dpio_get(dev_priv);
3448 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3449 vlv_dpio_put(dev_priv);
3451 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3452 clock.m2 = mdiv & DPIO_M2DIV_MASK;
3453 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3454 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3455 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3457 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3460 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3461 struct intel_crtc_state *pipe_config)
3463 struct drm_device *dev = crtc->base.dev;
3464 struct drm_i915_private *dev_priv = to_i915(dev);
3465 enum pipe pipe = crtc->pipe;
3466 enum dpio_channel port = vlv_pipe_to_channel(pipe);
3468 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3469 int refclk = 100000;
3471 /* In case of DSI, DPLL will not be used */
3472 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3475 vlv_dpio_get(dev_priv);
3476 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3477 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3478 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3479 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3480 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3481 vlv_dpio_put(dev_priv);
3483 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3484 clock.m2 = (pll_dw0 & 0xff) << 22;
3485 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3486 clock.m2 |= pll_dw2 & 0x3fffff;
3487 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3488 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3489 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3491 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3494 static enum intel_output_format
3495 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3497 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3500 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3502 if (tmp & PIPEMISC_YUV420_ENABLE) {
3503 /* We support 4:2:0 in full blend mode only */
3504 drm_WARN_ON(&dev_priv->drm,
3505 (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3507 return INTEL_OUTPUT_FORMAT_YCBCR420;
3508 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3509 return INTEL_OUTPUT_FORMAT_YCBCR444;
3511 return INTEL_OUTPUT_FORMAT_RGB;
3515 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3517 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3518 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3519 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3520 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3523 tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3525 if (tmp & DISP_PIPE_GAMMA_ENABLE)
3526 crtc_state->gamma_enable = true;
3528 if (!HAS_GMCH(dev_priv) &&
3529 tmp & DISP_PIPE_CSC_ENABLE)
3530 crtc_state->csc_enable = true;
3533 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3534 struct intel_crtc_state *pipe_config)
3536 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3537 enum intel_display_power_domain power_domain;
3538 intel_wakeref_t wakeref;
3542 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3543 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3547 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3548 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3549 pipe_config->shared_dpll = NULL;
3553 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3554 if (!(tmp & PIPECONF_ENABLE))
3557 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3558 IS_CHERRYVIEW(dev_priv)) {
3559 switch (tmp & PIPECONF_BPC_MASK) {
3560 case PIPECONF_BPC_6:
3561 pipe_config->pipe_bpp = 18;
3563 case PIPECONF_BPC_8:
3564 pipe_config->pipe_bpp = 24;
3566 case PIPECONF_BPC_10:
3567 pipe_config->pipe_bpp = 30;
3575 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3576 (tmp & PIPECONF_COLOR_RANGE_SELECT))
3577 pipe_config->limited_color_range = true;
3579 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
3581 if (IS_CHERRYVIEW(dev_priv))
3582 pipe_config->cgm_mode = intel_de_read(dev_priv,
3583 CGM_PIPE_MODE(crtc->pipe));
3585 i9xx_get_pipe_color_config(pipe_config);
3586 intel_color_get_config(pipe_config);
3588 if (DISPLAY_VER(dev_priv) < 4)
3589 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3591 intel_get_transcoder_timings(crtc, pipe_config);
3592 intel_get_pipe_src_size(crtc, pipe_config);
3594 i9xx_get_pfit_config(pipe_config);
3596 if (DISPLAY_VER(dev_priv) >= 4) {
3597 /* No way to read it out on pipes B and C */
3598 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3599 tmp = dev_priv->chv_dpll_md[crtc->pipe];
3601 tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3602 pipe_config->pixel_multiplier =
3603 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3604 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3605 pipe_config->dpll_hw_state.dpll_md = tmp;
3606 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3607 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3608 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3609 pipe_config->pixel_multiplier =
3610 ((tmp & SDVO_MULTIPLIER_MASK)
3611 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3613 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3614 * port and will be fixed up in the encoder->get_config
3616 pipe_config->pixel_multiplier = 1;
3618 pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3620 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3621 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3623 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3626 /* Mask out read-only status bits. */
3627 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3628 DPLL_PORTC_READY_MASK |
3629 DPLL_PORTB_READY_MASK);
3632 if (IS_CHERRYVIEW(dev_priv))
3633 chv_crtc_clock_get(crtc, pipe_config);
3634 else if (IS_VALLEYVIEW(dev_priv))
3635 vlv_crtc_clock_get(crtc, pipe_config);
3637 i9xx_crtc_clock_get(crtc, pipe_config);
3640 * Normally the dotclock is filled in by the encoder .get_config()
3641 * but in case the pipe is enabled w/o any ports we need a sane
3644 pipe_config->hw.adjusted_mode.crtc_clock =
3645 pipe_config->port_clock / pipe_config->pixel_multiplier;
3650 intel_display_power_put(dev_priv, power_domain, wakeref);
3655 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3657 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3658 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3659 enum pipe pipe = crtc->pipe;
3664 switch (crtc_state->pipe_bpp) {
3666 val |= PIPECONF_BPC_6;
3669 val |= PIPECONF_BPC_8;
3672 val |= PIPECONF_BPC_10;
3675 val |= PIPECONF_BPC_12;
3678 /* Case prevented by intel_choose_pipe_bpp_dither. */
3682 if (crtc_state->dither)
3683 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3685 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3686 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3688 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3691 * This would end up with an odd purple hue over
3692 * the entire display. Make sure we don't do it.
3694 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3695 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3697 if (crtc_state->limited_color_range &&
3698 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3699 val |= PIPECONF_COLOR_RANGE_SELECT;
3701 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3702 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3704 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3706 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3708 intel_de_write(dev_priv, PIPECONF(pipe), val);
3709 intel_de_posting_read(dev_priv, PIPECONF(pipe));
3712 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3714 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3715 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3716 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3719 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3720 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3722 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3723 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3725 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3727 if (IS_HASWELL(dev_priv) &&
3728 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3729 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3731 intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3732 intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3735 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3737 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3738 const struct intel_crtc_scaler_state *scaler_state =
3739 &crtc_state->scaler_state;
3741 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3745 switch (crtc_state->pipe_bpp) {
3747 val |= PIPEMISC_BPC_6;
3750 val |= PIPEMISC_BPC_8;
3753 val |= PIPEMISC_BPC_10;
3756 /* Port output 12BPC defined for ADLP+ */
3757 if (DISPLAY_VER(dev_priv) > 12)
3758 val |= PIPEMISC_BPC_12_ADLP;
3761 MISSING_CASE(crtc_state->pipe_bpp);
3765 if (crtc_state->dither)
3766 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3768 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3769 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3770 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3772 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3773 val |= PIPEMISC_YUV420_ENABLE |
3774 PIPEMISC_YUV420_MODE_FULL_BLEND;
3776 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3777 val |= PIPEMISC_HDR_MODE_PRECISION;
3779 if (DISPLAY_VER(dev_priv) >= 12)
3780 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3782 if (IS_ALDERLAKE_P(dev_priv)) {
3783 bool scaler_in_use = false;
3785 for (i = 0; i < crtc->num_scalers; i++) {
3786 if (!scaler_state->scalers[i].in_use)
3789 scaler_in_use = true;
3793 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3794 PIPE_MISC2_BUBBLE_COUNTER_MASK,
3795 scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3796 PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3799 intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3802 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3804 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3807 tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3809 switch (tmp & PIPEMISC_BPC_MASK) {
3810 case PIPEMISC_BPC_6:
3812 case PIPEMISC_BPC_8:
3814 case PIPEMISC_BPC_10:
3817 * PORT OUTPUT 12 BPC defined for ADLP+.
3820 * For previous platforms with DSI interface, bits 5:7
3821 * are used for storing pipe_bpp irrespective of dithering.
3822 * Since the value of 12 BPC is not defined for these bits
3823 * on older platforms, need to find a workaround for 12 BPC
3824 * MIPI DSI HW readout.
3826 case PIPEMISC_BPC_12_ADLP:
3827 if (DISPLAY_VER(dev_priv) > 12)
3836 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3839 * Account for spread spectrum to avoid
3840 * oversubscribing the link. Max center spread
3841 * is 2.5%; use 5% for safety's sake.
3843 u32 bps = target_clock * bpp * 21 / 20;
3844 return DIV_ROUND_UP(bps, link_bw * 8);
3847 static void intel_get_m_n(struct drm_i915_private *i915,
3848 struct intel_link_m_n *m_n,
3849 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3850 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3852 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3853 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3854 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3855 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3856 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3859 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3860 struct intel_link_m_n *m_n)
3862 struct drm_device *dev = crtc->base.dev;
3863 struct drm_i915_private *dev_priv = to_i915(dev);
3864 enum pipe pipe = crtc->pipe;
3866 intel_get_m_n(dev_priv, m_n,
3867 PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
3868 PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
3871 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3872 enum transcoder transcoder,
3873 struct intel_link_m_n *m_n,
3874 struct intel_link_m_n *m2_n2)
3876 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3877 enum pipe pipe = crtc->pipe;
3879 if (DISPLAY_VER(dev_priv) >= 5) {
3880 intel_get_m_n(dev_priv, m_n,
3881 PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3882 PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3884 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3885 intel_get_m_n(dev_priv, m2_n2,
3886 PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3887 PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3890 intel_get_m_n(dev_priv, m_n,
3891 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3892 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3896 void intel_dp_get_m_n(struct intel_crtc *crtc,
3897 struct intel_crtc_state *pipe_config)
3899 if (pipe_config->has_pch_encoder)
3900 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3902 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3903 &pipe_config->dp_m_n,
3904 &pipe_config->dp_m2_n2);
3907 void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3908 struct intel_crtc_state *pipe_config)
3910 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3911 &pipe_config->fdi_m_n, NULL);
3914 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3917 drm_rect_init(&crtc_state->pch_pfit.dst,
3918 pos >> 16, pos & 0xffff,
3919 size >> 16, size & 0xffff);
3922 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3924 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3925 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3926 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3930 /* find scaler attached to this pipe */
3931 for (i = 0; i < crtc->num_scalers; i++) {
3934 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3935 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3939 crtc_state->pch_pfit.enabled = true;
3941 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3942 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3944 ilk_get_pfit_pos_size(crtc_state, pos, size);
3946 scaler_state->scalers[i].in_use = true;
3950 scaler_state->scaler_id = id;
3952 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3954 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3957 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3959 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3960 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3963 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3964 if ((ctl & PF_ENABLE) == 0)
3967 crtc_state->pch_pfit.enabled = true;
3969 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3970 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3972 ilk_get_pfit_pos_size(crtc_state, pos, size);
3975 * We currently do not free assignements of panel fitters on
3976 * ivb/hsw (since we don't use the higher upscaling modes which
3977 * differentiates them) so just WARN about this case for now.
3979 drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3980 (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3983 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3984 struct intel_crtc_state *pipe_config)
3986 struct drm_device *dev = crtc->base.dev;
3987 struct drm_i915_private *dev_priv = to_i915(dev);
3988 enum intel_display_power_domain power_domain;
3989 intel_wakeref_t wakeref;
3993 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3994 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3998 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3999 pipe_config->shared_dpll = NULL;
4002 tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4003 if (!(tmp & PIPECONF_ENABLE))
4006 switch (tmp & PIPECONF_BPC_MASK) {
4007 case PIPECONF_BPC_6:
4008 pipe_config->pipe_bpp = 18;
4010 case PIPECONF_BPC_8:
4011 pipe_config->pipe_bpp = 24;
4013 case PIPECONF_BPC_10:
4014 pipe_config->pipe_bpp = 30;
4016 case PIPECONF_BPC_12:
4017 pipe_config->pipe_bpp = 36;
4023 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4024 pipe_config->limited_color_range = true;
4026 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4027 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4028 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4029 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4032 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4036 pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
4038 pipe_config->csc_mode = intel_de_read(dev_priv,
4039 PIPE_CSC_MODE(crtc->pipe));
4041 i9xx_get_pipe_color_config(pipe_config);
4042 intel_color_get_config(pipe_config);
4044 pipe_config->pixel_multiplier = 1;
4046 ilk_pch_get_config(pipe_config);
4048 intel_get_transcoder_timings(crtc, pipe_config);
4049 intel_get_pipe_src_size(crtc, pipe_config);
4051 ilk_get_pfit_config(pipe_config);
4056 intel_display_power_put(dev_priv, power_domain, wakeref);
4061 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4063 if (DISPLAY_VER(i915) >= 12)
4064 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4065 else if (DISPLAY_VER(i915) >= 11)
4066 return BIT(PIPE_B) | BIT(PIPE_C);
4071 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4072 enum transcoder cpu_transcoder)
4074 enum intel_display_power_domain power_domain;
4075 intel_wakeref_t wakeref;
4078 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4080 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4081 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4083 return tmp & TRANS_DDI_FUNC_ENABLE;
4086 static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4088 u8 master_pipes = 0, slave_pipes = 0;
4089 struct intel_crtc *crtc;
4091 for_each_intel_crtc(&dev_priv->drm, crtc) {
4092 enum intel_display_power_domain power_domain;
4093 enum pipe pipe = crtc->pipe;
4094 intel_wakeref_t wakeref;
4096 if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4099 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4100 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4101 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4103 if (!(tmp & BIG_JOINER_ENABLE))
4106 if (tmp & MASTER_BIG_JOINER_ENABLE)
4107 master_pipes |= BIT(pipe);
4109 slave_pipes |= BIT(pipe);
4112 if (DISPLAY_VER(dev_priv) < 13)
4115 power_domain = POWER_DOMAIN_PIPE(pipe);
4116 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4117 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4119 if (tmp & UNCOMPRESSED_JOINER_MASTER)
4120 master_pipes |= BIT(pipe);
4121 if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4122 slave_pipes |= BIT(pipe);
4126 /* Bigjoiner pipes should always be consecutive master and slave */
4127 drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4128 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4129 master_pipes, slave_pipes);
4134 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4136 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4138 if (DISPLAY_VER(i915) >= 11)
4139 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4141 return panel_transcoder_mask;
4144 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4146 struct drm_device *dev = crtc->base.dev;
4147 struct drm_i915_private *dev_priv = to_i915(dev);
4148 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4149 enum transcoder cpu_transcoder;
4150 u8 enabled_transcoders = 0;
4153 * XXX: Do intel_display_power_get_if_enabled before reading this (for
4154 * consistency and less surprising code; it's in always on power).
4156 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4157 panel_transcoder_mask) {
4158 enum intel_display_power_domain power_domain;
4159 intel_wakeref_t wakeref;
4160 enum pipe trans_pipe;
4163 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4164 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4165 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4167 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4170 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4173 "unknown pipe linked to transcoder %s\n",
4174 transcoder_name(cpu_transcoder));
4176 case TRANS_DDI_EDP_INPUT_A_ONOFF:
4177 case TRANS_DDI_EDP_INPUT_A_ON:
4178 trans_pipe = PIPE_A;
4180 case TRANS_DDI_EDP_INPUT_B_ONOFF:
4181 trans_pipe = PIPE_B;
4183 case TRANS_DDI_EDP_INPUT_C_ONOFF:
4184 trans_pipe = PIPE_C;
4186 case TRANS_DDI_EDP_INPUT_D_ONOFF:
4187 trans_pipe = PIPE_D;
4191 if (trans_pipe == crtc->pipe)
4192 enabled_transcoders |= BIT(cpu_transcoder);
4195 /* single pipe or bigjoiner master */
4196 cpu_transcoder = (enum transcoder) crtc->pipe;
4197 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4198 enabled_transcoders |= BIT(cpu_transcoder);
4200 /* bigjoiner slave -> consider the master pipe's transcoder as well */
4201 if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4202 cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4203 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4204 enabled_transcoders |= BIT(cpu_transcoder);
4207 return enabled_transcoders;
4210 static bool has_edp_transcoders(u8 enabled_transcoders)
4212 return enabled_transcoders & BIT(TRANSCODER_EDP);
4215 static bool has_dsi_transcoders(u8 enabled_transcoders)
4217 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4218 BIT(TRANSCODER_DSI_1));
4221 static bool has_pipe_transcoders(u8 enabled_transcoders)
4223 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4224 BIT(TRANSCODER_DSI_0) |
4225 BIT(TRANSCODER_DSI_1));
4228 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4229 u8 enabled_transcoders)
4231 /* Only one type of transcoder please */
4232 drm_WARN_ON(&i915->drm,
4233 has_edp_transcoders(enabled_transcoders) +
4234 has_dsi_transcoders(enabled_transcoders) +
4235 has_pipe_transcoders(enabled_transcoders) > 1);
4237 /* Only DSI transcoders can be ganged */
4238 drm_WARN_ON(&i915->drm,
4239 !has_dsi_transcoders(enabled_transcoders) &&
4240 !is_power_of_2(enabled_transcoders));
4243 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4244 struct intel_crtc_state *pipe_config,
4245 struct intel_display_power_domain_set *power_domain_set)
4247 struct drm_device *dev = crtc->base.dev;
4248 struct drm_i915_private *dev_priv = to_i915(dev);
4249 unsigned long enabled_transcoders;
4252 enabled_transcoders = hsw_enabled_transcoders(crtc);
4253 if (!enabled_transcoders)
4256 assert_enabled_transcoders(dev_priv, enabled_transcoders);
4259 * With the exception of DSI we should only ever have
4260 * a single enabled transcoder. With DSI let's just
4261 * pick the first one.
4263 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4265 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4266 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4269 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4270 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4272 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4273 pipe_config->pch_pfit.force_thru = true;
4276 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4278 return tmp & PIPECONF_ENABLE;
4281 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4282 struct intel_crtc_state *pipe_config,
4283 struct intel_display_power_domain_set *power_domain_set)
4285 struct drm_device *dev = crtc->base.dev;
4286 struct drm_i915_private *dev_priv = to_i915(dev);
4287 enum transcoder cpu_transcoder;
4291 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4293 cpu_transcoder = TRANSCODER_DSI_A;
4295 cpu_transcoder = TRANSCODER_DSI_C;
4297 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4298 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4302 * The PLL needs to be enabled with a valid divider
4303 * configuration, otherwise accessing DSI registers will hang
4304 * the machine. See BSpec North Display Engine
4305 * registers/MIPI[BXT]. We can break out here early, since we
4306 * need the same DSI PLL to be enabled for both DSI ports.
4308 if (!bxt_dsi_pll_is_enabled(dev_priv))
4311 /* XXX: this works for video mode only */
4312 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4313 if (!(tmp & DPI_ENABLE))
4316 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4317 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4320 pipe_config->cpu_transcoder = cpu_transcoder;
4324 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4327 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4328 struct intel_crtc_state *pipe_config)
4330 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4331 struct intel_display_power_domain_set power_domain_set = { };
4335 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4336 POWER_DOMAIN_PIPE(crtc->pipe)))
4339 pipe_config->shared_dpll = NULL;
4341 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4343 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4344 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4345 drm_WARN_ON(&dev_priv->drm, active);
4352 intel_dsc_get_config(pipe_config);
4353 if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4354 intel_uncompressed_joiner_get_config(pipe_config);
4356 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4357 DISPLAY_VER(dev_priv) >= 11)
4358 intel_get_transcoder_timings(crtc, pipe_config);
4360 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4361 intel_vrr_get_config(crtc, pipe_config);
4363 intel_get_pipe_src_size(crtc, pipe_config);
4365 if (IS_HASWELL(dev_priv)) {
4366 u32 tmp = intel_de_read(dev_priv,
4367 PIPECONF(pipe_config->cpu_transcoder));
4369 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4370 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4372 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4374 pipe_config->output_format =
4375 bdw_get_pipemisc_output_format(crtc);
4378 pipe_config->gamma_mode = intel_de_read(dev_priv,
4379 GAMMA_MODE(crtc->pipe));
4381 pipe_config->csc_mode = intel_de_read(dev_priv,
4382 PIPE_CSC_MODE(crtc->pipe));
4384 if (DISPLAY_VER(dev_priv) >= 9) {
4385 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4387 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4388 pipe_config->gamma_enable = true;
4390 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4391 pipe_config->csc_enable = true;
4393 i9xx_get_pipe_color_config(pipe_config);
4396 intel_color_get_config(pipe_config);
4398 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4399 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4400 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4401 pipe_config->ips_linetime =
4402 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4404 if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4405 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4406 if (DISPLAY_VER(dev_priv) >= 9)
4407 skl_get_pfit_config(pipe_config);
4409 ilk_get_pfit_config(pipe_config);
4412 if (hsw_crtc_supports_ips(crtc)) {
4413 if (IS_HASWELL(dev_priv))
4414 pipe_config->ips_enabled = intel_de_read(dev_priv,
4415 IPS_CTL) & IPS_ENABLE;
4418 * We cannot readout IPS state on broadwell, set to
4419 * true so we can set it to a defined state on first
4422 pipe_config->ips_enabled = true;
4426 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4427 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4428 pipe_config->pixel_multiplier =
4429 intel_de_read(dev_priv,
4430 PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4432 pipe_config->pixel_multiplier = 1;
4436 intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4441 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4443 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4444 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4446 if (!i915->display->get_pipe_config(crtc, crtc_state))
4449 crtc_state->hw.active = true;
4451 intel_crtc_readout_derived_state(crtc_state);
4456 /* VESA 640x480x72Hz mode to set on the pipe */
4457 static const struct drm_display_mode load_detect_mode = {
4458 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4459 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4462 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4463 struct drm_crtc *crtc)
4465 struct drm_plane *plane;
4466 struct drm_plane_state *plane_state;
4469 ret = drm_atomic_add_affected_planes(state, crtc);
4473 for_each_new_plane_in_state(state, plane, plane_state, i) {
4474 if (plane_state->crtc != crtc)
4477 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4481 drm_atomic_set_fb_for_plane(plane_state, NULL);
4487 int intel_get_load_detect_pipe(struct drm_connector *connector,
4488 struct intel_load_detect_pipe *old,
4489 struct drm_modeset_acquire_ctx *ctx)
4491 struct intel_encoder *encoder =
4492 intel_attached_encoder(to_intel_connector(connector));
4493 struct intel_crtc *possible_crtc;
4494 struct intel_crtc *crtc = NULL;
4495 struct drm_device *dev = encoder->base.dev;
4496 struct drm_i915_private *dev_priv = to_i915(dev);
4497 struct drm_mode_config *config = &dev->mode_config;
4498 struct drm_atomic_state *state = NULL, *restore_state = NULL;
4499 struct drm_connector_state *connector_state;
4500 struct intel_crtc_state *crtc_state;
4503 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4504 connector->base.id, connector->name,
4505 encoder->base.base.id, encoder->base.name);
4507 old->restore_state = NULL;
4509 drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4512 * Algorithm gets a little messy:
4514 * - if the connector already has an assigned crtc, use it (but make
4515 * sure it's on first)
4517 * - try to find the first unused crtc that can drive this connector,
4518 * and use that if we find one
4521 /* See if we already have a CRTC for this connector */
4522 if (connector->state->crtc) {
4523 crtc = to_intel_crtc(connector->state->crtc);
4525 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4529 /* Make sure the crtc and connector are running */
4533 /* Find an unused one (if possible) */
4534 for_each_intel_crtc(dev, possible_crtc) {
4535 if (!(encoder->base.possible_crtcs &
4536 drm_crtc_mask(&possible_crtc->base)))
4539 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4543 if (possible_crtc->base.state->enable) {
4544 drm_modeset_unlock(&possible_crtc->base.mutex);
4548 crtc = possible_crtc;
4553 * If we didn't find an unused CRTC, don't use any.
4556 drm_dbg_kms(&dev_priv->drm,
4557 "no pipe available for load-detect\n");
4563 state = drm_atomic_state_alloc(dev);
4564 restore_state = drm_atomic_state_alloc(dev);
4565 if (!state || !restore_state) {
4570 state->acquire_ctx = ctx;
4571 restore_state->acquire_ctx = ctx;
4573 connector_state = drm_atomic_get_connector_state(state, connector);
4574 if (IS_ERR(connector_state)) {
4575 ret = PTR_ERR(connector_state);
4579 ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4583 crtc_state = intel_atomic_get_crtc_state(state, crtc);
4584 if (IS_ERR(crtc_state)) {
4585 ret = PTR_ERR(crtc_state);
4589 crtc_state->uapi.active = true;
4591 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4596 ret = intel_modeset_disable_planes(state, &crtc->base);
4600 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4602 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4604 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4606 drm_dbg_kms(&dev_priv->drm,
4607 "Failed to create a copy of old state to restore: %i\n",
4612 ret = drm_atomic_commit(state);
4614 drm_dbg_kms(&dev_priv->drm,
4615 "failed to set mode on load-detect pipe\n");
4619 old->restore_state = restore_state;
4620 drm_atomic_state_put(state);
4622 /* let the connector get through one full cycle before testing */
4623 intel_crtc_wait_for_next_vblank(crtc);
4629 drm_atomic_state_put(state);
4632 if (restore_state) {
4633 drm_atomic_state_put(restore_state);
4634 restore_state = NULL;
4637 if (ret == -EDEADLK)
4643 void intel_release_load_detect_pipe(struct drm_connector *connector,
4644 struct intel_load_detect_pipe *old,
4645 struct drm_modeset_acquire_ctx *ctx)
4647 struct intel_encoder *intel_encoder =
4648 intel_attached_encoder(to_intel_connector(connector));
4649 struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4650 struct drm_encoder *encoder = &intel_encoder->base;
4651 struct drm_atomic_state *state = old->restore_state;
4654 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4655 connector->base.id, connector->name,
4656 encoder->base.id, encoder->name);
4661 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4663 drm_dbg_kms(&i915->drm,
4664 "Couldn't release load detect pipe: %i\n", ret);
4665 drm_atomic_state_put(state);
4668 static int i9xx_pll_refclk(struct drm_device *dev,
4669 const struct intel_crtc_state *pipe_config)
4671 struct drm_i915_private *dev_priv = to_i915(dev);
4672 u32 dpll = pipe_config->dpll_hw_state.dpll;
4674 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4675 return dev_priv->vbt.lvds_ssc_freq;
4676 else if (HAS_PCH_SPLIT(dev_priv))
4678 else if (DISPLAY_VER(dev_priv) != 2)
4684 /* Returns the clock of the currently programmed mode of the given pipe. */
4685 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4686 struct intel_crtc_state *pipe_config)
4688 struct drm_device *dev = crtc->base.dev;
4689 struct drm_i915_private *dev_priv = to_i915(dev);
4690 u32 dpll = pipe_config->dpll_hw_state.dpll;
4694 int refclk = i9xx_pll_refclk(dev, pipe_config);
4696 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4697 fp = pipe_config->dpll_hw_state.fp0;
4699 fp = pipe_config->dpll_hw_state.fp1;
4701 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4702 if (IS_PINEVIEW(dev_priv)) {
4703 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4704 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4706 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4707 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4710 if (DISPLAY_VER(dev_priv) != 2) {
4711 if (IS_PINEVIEW(dev_priv))
4712 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4713 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4715 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4716 DPLL_FPA01_P1_POST_DIV_SHIFT);
4718 switch (dpll & DPLL_MODE_MASK) {
4719 case DPLLB_MODE_DAC_SERIAL:
4720 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4723 case DPLLB_MODE_LVDS:
4724 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4728 drm_dbg_kms(&dev_priv->drm,
4729 "Unknown DPLL mode %08x in programmed "
4730 "mode\n", (int)(dpll & DPLL_MODE_MASK));
4734 if (IS_PINEVIEW(dev_priv))
4735 port_clock = pnv_calc_dpll_params(refclk, &clock);
4737 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4739 enum pipe lvds_pipe;
4741 if (IS_I85X(dev_priv) &&
4742 intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4743 lvds_pipe == crtc->pipe) {
4744 u32 lvds = intel_de_read(dev_priv, LVDS);
4746 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4747 DPLL_FPA01_P1_POST_DIV_SHIFT);
4749 if (lvds & LVDS_CLKB_POWER_UP)
4754 if (dpll & PLL_P1_DIVIDE_BY_TWO)
4757 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4758 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4760 if (dpll & PLL_P2_DIVIDE_BY_4)
4766 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4770 * This value includes pixel_multiplier. We will use
4771 * port_clock to compute adjusted_mode.crtc_clock in the
4772 * encoder's get_config() function.
4774 pipe_config->port_clock = port_clock;
4777 int intel_dotclock_calculate(int link_freq,
4778 const struct intel_link_m_n *m_n)
4781 * The calculation for the data clock is:
4782 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4783 * But we want to avoid losing precison if possible, so:
4784 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4786 * and the link clock is simpler:
4787 * link_clock = (m * link_clock) / n
4793 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4796 /* Returns the currently programmed mode of the given encoder. */
4797 struct drm_display_mode *
4798 intel_encoder_current_mode(struct intel_encoder *encoder)
4800 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4801 struct intel_crtc_state *crtc_state;
4802 struct drm_display_mode *mode;
4803 struct intel_crtc *crtc;
4806 if (!encoder->get_hw_state(encoder, &pipe))
4809 crtc = intel_crtc_for_pipe(dev_priv, pipe);
4811 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4815 crtc_state = intel_crtc_state_alloc(crtc);
4821 if (!intel_crtc_get_pipe_config(crtc_state)) {
4827 intel_encoder_get_config(encoder, crtc_state);
4829 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4837 * intel_wm_need_update - Check whether watermarks need updating
4838 * @cur: current plane state
4839 * @new: new plane state
4841 * Check current plane state versus the new one to determine whether
4842 * watermarks need to be recalculated.
4844 * Returns true or false.
4846 static bool intel_wm_need_update(const struct intel_plane_state *cur,
4847 struct intel_plane_state *new)
4849 /* Update watermarks on tiling or size changes. */
4850 if (new->uapi.visible != cur->uapi.visible)
4853 if (!cur->hw.fb || !new->hw.fb)
4856 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4857 cur->hw.rotation != new->hw.rotation ||
4858 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4859 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4860 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4861 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4867 static bool needs_scaling(const struct intel_plane_state *state)
4869 int src_w = drm_rect_width(&state->uapi.src) >> 16;
4870 int src_h = drm_rect_height(&state->uapi.src) >> 16;
4871 int dst_w = drm_rect_width(&state->uapi.dst);
4872 int dst_h = drm_rect_height(&state->uapi.dst);
4874 return (src_w != dst_w || src_h != dst_h);
4877 static bool intel_plane_do_async_flip(struct intel_plane *plane,
4878 const struct intel_crtc_state *old_crtc_state,
4879 const struct intel_crtc_state *new_crtc_state)
4881 struct drm_i915_private *i915 = to_i915(plane->base.dev);
4883 if (!plane->async_flip)
4886 if (!new_crtc_state->uapi.async_flip)
4890 * In platforms after DISPLAY13, we might need to override
4891 * first async flip in order to change watermark levels
4892 * as part of optimization.
4893 * So for those, we are checking if this is a first async flip.
4894 * For platforms earlier than DISPLAY13 we always do async flip.
4896 return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
4899 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4900 struct intel_crtc_state *new_crtc_state,
4901 const struct intel_plane_state *old_plane_state,
4902 struct intel_plane_state *new_plane_state)
4904 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4905 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4906 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4907 bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4908 bool was_crtc_enabled = old_crtc_state->hw.active;
4909 bool is_crtc_enabled = new_crtc_state->hw.active;
4910 bool turn_off, turn_on, visible, was_visible;
4913 if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4914 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4919 was_visible = old_plane_state->uapi.visible;
4920 visible = new_plane_state->uapi.visible;
4922 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4923 was_visible = false;
4926 * Visibility is calculated as if the crtc was on, but
4927 * after scaler setup everything depends on it being off
4928 * when the crtc isn't active.
4930 * FIXME this is wrong for watermarks. Watermarks should also
4931 * be computed as if the pipe would be active. Perhaps move
4932 * per-plane wm computation to the .check_plane() hook, and
4933 * only combine the results from all planes in the current place?
4935 if (!is_crtc_enabled) {
4936 intel_plane_set_invisible(new_crtc_state, new_plane_state);
4940 if (!was_visible && !visible)
4943 turn_off = was_visible && (!visible || mode_changed);
4944 turn_on = visible && (!was_visible || mode_changed);
4946 drm_dbg_atomic(&dev_priv->drm,
4947 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4948 crtc->base.base.id, crtc->base.name,
4949 plane->base.base.id, plane->base.name,
4950 was_visible, visible,
4951 turn_off, turn_on, mode_changed);
4954 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4955 new_crtc_state->update_wm_pre = true;
4957 /* must disable cxsr around plane enable/disable */
4958 if (plane->id != PLANE_CURSOR)
4959 new_crtc_state->disable_cxsr = true;
4960 } else if (turn_off) {
4961 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4962 new_crtc_state->update_wm_post = true;
4964 /* must disable cxsr around plane enable/disable */
4965 if (plane->id != PLANE_CURSOR)
4966 new_crtc_state->disable_cxsr = true;
4967 } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
4968 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
4969 /* FIXME bollocks */
4970 new_crtc_state->update_wm_pre = true;
4971 new_crtc_state->update_wm_post = true;
4975 if (visible || was_visible)
4976 new_crtc_state->fb_bits |= plane->frontbuffer_bit;
4979 * ILK/SNB DVSACNTR/Sprite Enable
4980 * IVB SPR_CTL/Sprite Enable
4981 * "When in Self Refresh Big FIFO mode, a write to enable the
4982 * plane will be internally buffered and delayed while Big FIFO
4985 * Which means that enabling the sprite can take an extra frame
4986 * when we start in big FIFO mode (LP1+). Thus we need to drop
4987 * down to LP0 and wait for vblank in order to make sure the
4988 * sprite gets enabled on the next vblank after the register write.
4989 * Doing otherwise would risk enabling the sprite one frame after
4990 * we've already signalled flip completion. We can resume LP1+
4991 * once the sprite has been enabled.
4994 * WaCxSRDisabledForSpriteScaling:ivb
4995 * IVB SPR_SCALE/Scaling Enable
4996 * "Low Power watermarks must be disabled for at least one
4997 * frame before enabling sprite scaling, and kept disabled
4998 * until sprite scaling is disabled."
5000 * ILK/SNB DVSASCALE/Scaling Enable
5001 * "When in Self Refresh Big FIFO mode, scaling enable will be
5002 * masked off while Big FIFO mode is exiting."
5004 * Despite the w/a only being listed for IVB we assume that
5005 * the ILK/SNB note has similar ramifications, hence we apply
5006 * the w/a on all three platforms.
5008 * With experimental results seems this is needed also for primary
5009 * plane, not only sprite plane.
5011 if (plane->id != PLANE_CURSOR &&
5012 (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5013 IS_IVYBRIDGE(dev_priv)) &&
5014 (turn_on || (!needs_scaling(old_plane_state) &&
5015 needs_scaling(new_plane_state))))
5016 new_crtc_state->disable_lp_wm = true;
5018 if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
5019 new_plane_state->do_async_flip = true;
5024 static bool encoders_cloneable(const struct intel_encoder *a,
5025 const struct intel_encoder *b)
5027 /* masks could be asymmetric, so check both ways */
5028 return a == b || (a->cloneable & (1 << b->type) &&
5029 b->cloneable & (1 << a->type));
5032 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5033 struct intel_crtc *crtc,
5034 struct intel_encoder *encoder)
5036 struct intel_encoder *source_encoder;
5037 struct drm_connector *connector;
5038 struct drm_connector_state *connector_state;
5041 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5042 if (connector_state->crtc != &crtc->base)
5046 to_intel_encoder(connector_state->best_encoder);
5047 if (!encoders_cloneable(encoder, source_encoder))
5054 static int icl_add_linked_planes(struct intel_atomic_state *state)
5056 struct intel_plane *plane, *linked;
5057 struct intel_plane_state *plane_state, *linked_plane_state;
5060 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5061 linked = plane_state->planar_linked_plane;
5066 linked_plane_state = intel_atomic_get_plane_state(state, linked);
5067 if (IS_ERR(linked_plane_state))
5068 return PTR_ERR(linked_plane_state);
5070 drm_WARN_ON(state->base.dev,
5071 linked_plane_state->planar_linked_plane != plane);
5072 drm_WARN_ON(state->base.dev,
5073 linked_plane_state->planar_slave == plane_state->planar_slave);
5079 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5081 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5082 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5083 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5084 struct intel_plane *plane, *linked;
5085 struct intel_plane_state *plane_state;
5088 if (DISPLAY_VER(dev_priv) < 11)
5092 * Destroy all old plane links and make the slave plane invisible
5093 * in the crtc_state->active_planes mask.
5095 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5096 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5099 plane_state->planar_linked_plane = NULL;
5100 if (plane_state->planar_slave && !plane_state->uapi.visible) {
5101 crtc_state->enabled_planes &= ~BIT(plane->id);
5102 crtc_state->active_planes &= ~BIT(plane->id);
5103 crtc_state->update_planes |= BIT(plane->id);
5106 plane_state->planar_slave = false;
5109 if (!crtc_state->nv12_planes)
5112 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5113 struct intel_plane_state *linked_state = NULL;
5115 if (plane->pipe != crtc->pipe ||
5116 !(crtc_state->nv12_planes & BIT(plane->id)))
5119 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5120 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5123 if (crtc_state->active_planes & BIT(linked->id))
5126 linked_state = intel_atomic_get_plane_state(state, linked);
5127 if (IS_ERR(linked_state))
5128 return PTR_ERR(linked_state);
5133 if (!linked_state) {
5134 drm_dbg_kms(&dev_priv->drm,
5135 "Need %d free Y planes for planar YUV\n",
5136 hweight8(crtc_state->nv12_planes));
5141 plane_state->planar_linked_plane = linked;
5143 linked_state->planar_slave = true;
5144 linked_state->planar_linked_plane = plane;
5145 crtc_state->enabled_planes |= BIT(linked->id);
5146 crtc_state->active_planes |= BIT(linked->id);
5147 crtc_state->update_planes |= BIT(linked->id);
5148 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5149 linked->base.name, plane->base.name);
5151 /* Copy parameters to slave plane */
5152 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5153 linked_state->color_ctl = plane_state->color_ctl;
5154 linked_state->view = plane_state->view;
5155 linked_state->decrypt = plane_state->decrypt;
5157 intel_plane_copy_hw_state(linked_state, plane_state);
5158 linked_state->uapi.src = plane_state->uapi.src;
5159 linked_state->uapi.dst = plane_state->uapi.dst;
5161 if (icl_is_hdr_plane(dev_priv, plane->id)) {
5162 if (linked->id == PLANE_SPRITE5)
5163 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
5164 else if (linked->id == PLANE_SPRITE4)
5165 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
5166 else if (linked->id == PLANE_SPRITE3)
5167 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
5168 else if (linked->id == PLANE_SPRITE2)
5169 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
5171 MISSING_CASE(linked->id);
5178 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5180 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5181 struct intel_atomic_state *state =
5182 to_intel_atomic_state(new_crtc_state->uapi.state);
5183 const struct intel_crtc_state *old_crtc_state =
5184 intel_atomic_get_old_crtc_state(state, crtc);
5186 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5189 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5191 const struct drm_display_mode *pipe_mode =
5192 &crtc_state->hw.pipe_mode;
5195 if (!crtc_state->hw.enable)
5198 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5199 pipe_mode->crtc_clock);
5201 return min(linetime_wm, 0x1ff);
5204 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5205 const struct intel_cdclk_state *cdclk_state)
5207 const struct drm_display_mode *pipe_mode =
5208 &crtc_state->hw.pipe_mode;
5211 if (!crtc_state->hw.enable)
5214 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5215 cdclk_state->logical.cdclk);
5217 return min(linetime_wm, 0x1ff);
5220 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5222 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5224 const struct drm_display_mode *pipe_mode =
5225 &crtc_state->hw.pipe_mode;
5228 if (!crtc_state->hw.enable)
5231 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5232 crtc_state->pixel_rate);
5234 /* Display WA #1135: BXT:ALL GLK:ALL */
5235 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5236 dev_priv->ipc_enabled)
5239 return min(linetime_wm, 0x1ff);
5242 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5243 struct intel_crtc *crtc)
5245 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5246 struct intel_crtc_state *crtc_state =
5247 intel_atomic_get_new_crtc_state(state, crtc);
5248 const struct intel_cdclk_state *cdclk_state;
5250 if (DISPLAY_VER(dev_priv) >= 9)
5251 crtc_state->linetime = skl_linetime_wm(crtc_state);
5253 crtc_state->linetime = hsw_linetime_wm(crtc_state);
5255 if (!hsw_crtc_supports_ips(crtc))
5258 cdclk_state = intel_atomic_get_cdclk_state(state);
5259 if (IS_ERR(cdclk_state))
5260 return PTR_ERR(cdclk_state);
5262 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5268 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5269 struct intel_crtc *crtc)
5271 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5272 struct intel_crtc_state *crtc_state =
5273 intel_atomic_get_new_crtc_state(state, crtc);
5274 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5277 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5278 mode_changed && !crtc_state->hw.active)
5279 crtc_state->update_wm_post = true;
5281 if (mode_changed && crtc_state->hw.enable &&
5282 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5283 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5289 * May need to update pipe gamma enable bits
5290 * when C8 planes are getting enabled/disabled.
5292 if (c8_planes_changed(crtc_state))
5293 crtc_state->uapi.color_mgmt_changed = true;
5295 if (mode_changed || crtc_state->update_pipe ||
5296 crtc_state->uapi.color_mgmt_changed) {
5297 ret = intel_color_check(crtc_state);
5302 ret = intel_compute_pipe_wm(state, crtc);
5304 drm_dbg_kms(&dev_priv->drm,
5305 "Target pipe watermarks are invalid\n");
5310 * Calculate 'intermediate' watermarks that satisfy both the
5311 * old state and the new state. We can program these
5314 ret = intel_compute_intermediate_wm(state, crtc);
5316 drm_dbg_kms(&dev_priv->drm,
5317 "No valid intermediate pipe watermarks are possible\n");
5321 if (DISPLAY_VER(dev_priv) >= 9) {
5322 if (mode_changed || crtc_state->update_pipe) {
5323 ret = skl_update_scaler_crtc(crtc_state);
5328 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5333 if (HAS_IPS(dev_priv)) {
5334 ret = hsw_compute_ips_config(crtc_state);
5339 if (DISPLAY_VER(dev_priv) >= 9 ||
5340 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5341 ret = hsw_compute_linetime_wm(state, crtc);
5347 ret = intel_psr2_sel_fetch_update(state, crtc);
5354 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5356 struct intel_connector *connector;
5357 struct drm_connector_list_iter conn_iter;
5359 drm_connector_list_iter_begin(dev, &conn_iter);
5360 for_each_intel_connector_iter(connector, &conn_iter) {
5361 struct drm_connector_state *conn_state = connector->base.state;
5362 struct intel_encoder *encoder =
5363 to_intel_encoder(connector->base.encoder);
5365 if (conn_state->crtc)
5366 drm_connector_put(&connector->base);
5369 struct intel_crtc *crtc =
5370 to_intel_crtc(encoder->base.crtc);
5371 const struct intel_crtc_state *crtc_state =
5372 to_intel_crtc_state(crtc->base.state);
5374 conn_state->best_encoder = &encoder->base;
5375 conn_state->crtc = &crtc->base;
5376 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5378 drm_connector_get(&connector->base);
5380 conn_state->best_encoder = NULL;
5381 conn_state->crtc = NULL;
5384 drm_connector_list_iter_end(&conn_iter);
5388 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5389 struct intel_crtc_state *pipe_config)
5391 struct drm_connector *connector = conn_state->connector;
5392 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5393 const struct drm_display_info *info = &connector->display_info;
5396 switch (conn_state->max_bpc) {
5410 MISSING_CASE(conn_state->max_bpc);
5414 if (bpp < pipe_config->pipe_bpp) {
5415 drm_dbg_kms(&i915->drm,
5416 "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5417 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5418 connector->base.id, connector->name,
5420 3 * conn_state->max_requested_bpc,
5421 pipe_config->pipe_bpp);
5423 pipe_config->pipe_bpp = bpp;
5430 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5431 struct intel_crtc_state *pipe_config)
5433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5434 struct drm_atomic_state *state = pipe_config->uapi.state;
5435 struct drm_connector *connector;
5436 struct drm_connector_state *connector_state;
5439 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5440 IS_CHERRYVIEW(dev_priv)))
5442 else if (DISPLAY_VER(dev_priv) >= 5)
5447 pipe_config->pipe_bpp = bpp;
5449 /* Clamp display bpp to connector max bpp */
5450 for_each_new_connector_in_state(state, connector, connector_state, i) {
5453 if (connector_state->crtc != &crtc->base)
5456 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5464 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5465 const struct drm_display_mode *mode)
5467 drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5468 "type: 0x%x flags: 0x%x\n",
5470 mode->crtc_hdisplay, mode->crtc_hsync_start,
5471 mode->crtc_hsync_end, mode->crtc_htotal,
5472 mode->crtc_vdisplay, mode->crtc_vsync_start,
5473 mode->crtc_vsync_end, mode->crtc_vtotal,
5474 mode->type, mode->flags);
5478 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5479 const char *id, unsigned int lane_count,
5480 const struct intel_link_m_n *m_n)
5482 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5484 drm_dbg_kms(&i915->drm,
5485 "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5487 m_n->data_m, m_n->data_n,
5488 m_n->link_m, m_n->link_n, m_n->tu);
5492 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5493 const union hdmi_infoframe *frame)
5495 if (!drm_debug_enabled(DRM_UT_KMS))
5498 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5502 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5503 const struct drm_dp_vsc_sdp *vsc)
5505 if (!drm_debug_enabled(DRM_UT_KMS))
5508 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5511 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5513 static const char * const output_type_str[] = {
5514 OUTPUT_TYPE(UNUSED),
5515 OUTPUT_TYPE(ANALOG),
5525 OUTPUT_TYPE(DP_MST),
5530 static void snprintf_output_types(char *buf, size_t len,
5531 unsigned int output_types)
5538 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5541 if ((output_types & BIT(i)) == 0)
5544 r = snprintf(str, len, "%s%s",
5545 str != buf ? "," : "", output_type_str[i]);
5551 output_types &= ~BIT(i);
5554 WARN_ON_ONCE(output_types != 0);
5557 static const char * const output_format_str[] = {
5558 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5559 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5560 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5563 static const char *output_formats(enum intel_output_format format)
5565 if (format >= ARRAY_SIZE(output_format_str))
5567 return output_format_str[format];
5570 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5572 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5573 struct drm_i915_private *i915 = to_i915(plane->base.dev);
5574 const struct drm_framebuffer *fb = plane_state->hw.fb;
5577 drm_dbg_kms(&i915->drm,
5578 "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5579 plane->base.base.id, plane->base.name,
5580 yesno(plane_state->uapi.visible));
5584 drm_dbg_kms(&i915->drm,
5585 "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5586 plane->base.base.id, plane->base.name,
5587 fb->base.id, fb->width, fb->height, &fb->format->format,
5588 fb->modifier, yesno(plane_state->uapi.visible));
5589 drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5590 plane_state->hw.rotation, plane_state->scaler_id);
5591 if (plane_state->uapi.visible)
5592 drm_dbg_kms(&i915->drm,
5593 "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5594 DRM_RECT_FP_ARG(&plane_state->uapi.src),
5595 DRM_RECT_ARG(&plane_state->uapi.dst));
5598 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5599 struct intel_atomic_state *state,
5600 const char *context)
5602 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5603 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5604 const struct intel_plane_state *plane_state;
5605 struct intel_plane *plane;
5609 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5610 crtc->base.base.id, crtc->base.name,
5611 yesno(pipe_config->hw.enable), context);
5613 if (!pipe_config->hw.enable)
5616 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5617 drm_dbg_kms(&dev_priv->drm,
5618 "active: %s, output_types: %s (0x%x), output format: %s\n",
5619 yesno(pipe_config->hw.active),
5620 buf, pipe_config->output_types,
5621 output_formats(pipe_config->output_format));
5623 drm_dbg_kms(&dev_priv->drm,
5624 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5625 transcoder_name(pipe_config->cpu_transcoder),
5626 pipe_config->pipe_bpp, pipe_config->dither);
5628 drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5629 transcoder_name(pipe_config->mst_master_transcoder));
5631 drm_dbg_kms(&dev_priv->drm,
5632 "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5633 transcoder_name(pipe_config->master_transcoder),
5634 pipe_config->sync_mode_slaves_mask);
5636 drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5637 pipe_config->bigjoiner_slave ? "slave" :
5638 pipe_config->bigjoiner ? "master" : "no");
5640 drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5641 enableddisabled(pipe_config->splitter.enable),
5642 pipe_config->splitter.link_count,
5643 pipe_config->splitter.pixel_overlap);
5645 if (pipe_config->has_pch_encoder)
5646 intel_dump_m_n_config(pipe_config, "fdi",
5647 pipe_config->fdi_lanes,
5648 &pipe_config->fdi_m_n);
5650 if (intel_crtc_has_dp_encoder(pipe_config)) {
5651 intel_dump_m_n_config(pipe_config, "dp m_n",
5652 pipe_config->lane_count, &pipe_config->dp_m_n);
5653 if (pipe_config->has_drrs)
5654 intel_dump_m_n_config(pipe_config, "dp m2_n2",
5655 pipe_config->lane_count,
5656 &pipe_config->dp_m2_n2);
5659 drm_dbg_kms(&dev_priv->drm,
5660 "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5661 pipe_config->has_audio, pipe_config->has_infoframe,
5662 pipe_config->infoframes.enable);
5664 if (pipe_config->infoframes.enable &
5665 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5666 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5667 pipe_config->infoframes.gcp);
5668 if (pipe_config->infoframes.enable &
5669 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5670 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5671 if (pipe_config->infoframes.enable &
5672 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5673 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5674 if (pipe_config->infoframes.enable &
5675 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5676 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5677 if (pipe_config->infoframes.enable &
5678 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5679 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5680 if (pipe_config->infoframes.enable &
5681 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5682 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5683 if (pipe_config->infoframes.enable &
5684 intel_hdmi_infoframe_enable(DP_SDP_VSC))
5685 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5687 drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5688 yesno(pipe_config->vrr.enable),
5689 pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5690 pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5691 pipe_config->vrr.flipline,
5692 intel_vrr_vmin_vblank_start(pipe_config),
5693 intel_vrr_vmax_vblank_start(pipe_config));
5695 drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5696 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5697 drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5698 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5699 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5700 drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5701 drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5702 intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5703 drm_dbg_kms(&dev_priv->drm,
5704 "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5705 pipe_config->port_clock,
5706 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5707 pipe_config->pixel_rate);
5709 drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5710 pipe_config->linetime, pipe_config->ips_linetime);
5712 if (DISPLAY_VER(dev_priv) >= 9)
5713 drm_dbg_kms(&dev_priv->drm,
5714 "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5716 pipe_config->scaler_state.scaler_users,
5717 pipe_config->scaler_state.scaler_id);
5719 if (HAS_GMCH(dev_priv))
5720 drm_dbg_kms(&dev_priv->drm,
5721 "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5722 pipe_config->gmch_pfit.control,
5723 pipe_config->gmch_pfit.pgm_ratios,
5724 pipe_config->gmch_pfit.lvds_border_bits);
5726 drm_dbg_kms(&dev_priv->drm,
5727 "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5728 DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5729 enableddisabled(pipe_config->pch_pfit.enabled),
5730 yesno(pipe_config->pch_pfit.force_thru));
5732 drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5733 pipe_config->ips_enabled, pipe_config->double_wide);
5735 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5737 if (IS_CHERRYVIEW(dev_priv))
5738 drm_dbg_kms(&dev_priv->drm,
5739 "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5740 pipe_config->cgm_mode, pipe_config->gamma_mode,
5741 pipe_config->gamma_enable, pipe_config->csc_enable);
5743 drm_dbg_kms(&dev_priv->drm,
5744 "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5745 pipe_config->csc_mode, pipe_config->gamma_mode,
5746 pipe_config->gamma_enable, pipe_config->csc_enable);
5748 drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5749 pipe_config->hw.degamma_lut ?
5750 drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5751 pipe_config->hw.gamma_lut ?
5752 drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5758 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5759 if (plane->pipe == crtc->pipe)
5760 intel_dump_plane_state(plane_state);
5764 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5766 struct drm_device *dev = state->base.dev;
5767 struct drm_connector *connector;
5768 struct drm_connector_list_iter conn_iter;
5769 unsigned int used_ports = 0;
5770 unsigned int used_mst_ports = 0;
5774 * We're going to peek into connector->state,
5775 * hence connection_mutex must be held.
5777 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5780 * Walk the connector list instead of the encoder
5781 * list to detect the problem on ddi platforms
5782 * where there's just one encoder per digital port.
5784 drm_connector_list_iter_begin(dev, &conn_iter);
5785 drm_for_each_connector_iter(connector, &conn_iter) {
5786 struct drm_connector_state *connector_state;
5787 struct intel_encoder *encoder;
5790 drm_atomic_get_new_connector_state(&state->base,
5792 if (!connector_state)
5793 connector_state = connector->state;
5795 if (!connector_state->best_encoder)
5798 encoder = to_intel_encoder(connector_state->best_encoder);
5800 drm_WARN_ON(dev, !connector_state->crtc);
5802 switch (encoder->type) {
5803 case INTEL_OUTPUT_DDI:
5804 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5807 case INTEL_OUTPUT_DP:
5808 case INTEL_OUTPUT_HDMI:
5809 case INTEL_OUTPUT_EDP:
5810 /* the same port mustn't appear more than once */
5811 if (used_ports & BIT(encoder->port))
5814 used_ports |= BIT(encoder->port);
5816 case INTEL_OUTPUT_DP_MST:
5824 drm_connector_list_iter_end(&conn_iter);
5826 /* can't mix MST and SST/HDMI on the same port */
5827 if (used_ports & used_mst_ports)
5834 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5835 struct intel_crtc_state *crtc_state)
5837 const struct intel_crtc_state *master_crtc_state;
5838 struct intel_crtc *master_crtc;
5840 master_crtc = intel_master_crtc(crtc_state);
5841 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5843 /* No need to copy state if the master state is unchanged */
5844 if (master_crtc_state)
5845 intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5849 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5850 struct intel_crtc_state *crtc_state)
5852 crtc_state->hw.enable = crtc_state->uapi.enable;
5853 crtc_state->hw.active = crtc_state->uapi.active;
5854 crtc_state->hw.mode = crtc_state->uapi.mode;
5855 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5856 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5858 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5861 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5863 if (crtc_state->bigjoiner_slave)
5866 crtc_state->uapi.enable = crtc_state->hw.enable;
5867 crtc_state->uapi.active = crtc_state->hw.active;
5868 drm_WARN_ON(crtc_state->uapi.crtc->dev,
5869 drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5871 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5872 crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5874 /* copy color blobs to uapi */
5875 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5876 crtc_state->hw.degamma_lut);
5877 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5878 crtc_state->hw.gamma_lut);
5879 drm_property_replace_blob(&crtc_state->uapi.ctm,
5880 crtc_state->hw.ctm);
5884 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5885 const struct intel_crtc_state *from_crtc_state)
5887 struct intel_crtc_state *saved_state;
5889 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5893 saved_state->uapi = crtc_state->uapi;
5894 saved_state->scaler_state = crtc_state->scaler_state;
5895 saved_state->shared_dpll = crtc_state->shared_dpll;
5896 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5897 saved_state->crc_enabled = crtc_state->crc_enabled;
5899 intel_crtc_free_hw_state(crtc_state);
5900 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5903 /* Re-init hw state */
5904 memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5905 crtc_state->hw.enable = from_crtc_state->hw.enable;
5906 crtc_state->hw.active = from_crtc_state->hw.active;
5907 crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5908 crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5911 crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5912 crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5913 crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5914 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5915 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5916 crtc_state->bigjoiner_slave = true;
5917 crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5918 crtc_state->has_audio = from_crtc_state->has_audio;
5924 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5925 struct intel_crtc_state *crtc_state)
5927 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5928 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5929 struct intel_crtc_state *saved_state;
5931 saved_state = intel_crtc_state_alloc(crtc);
5935 /* free the old crtc_state->hw members */
5936 intel_crtc_free_hw_state(crtc_state);
5938 /* FIXME: before the switch to atomic started, a new pipe_config was
5939 * kzalloc'd. Code that depends on any field being zero should be
5940 * fixed, so that the crtc_state can be safely duplicated. For now,
5941 * only fields that are know to not cause problems are preserved. */
5943 saved_state->uapi = crtc_state->uapi;
5944 saved_state->scaler_state = crtc_state->scaler_state;
5945 saved_state->shared_dpll = crtc_state->shared_dpll;
5946 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5947 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5948 sizeof(saved_state->icl_port_dplls));
5949 saved_state->crc_enabled = crtc_state->crc_enabled;
5950 if (IS_G4X(dev_priv) ||
5951 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5952 saved_state->wm = crtc_state->wm;
5954 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5957 intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5963 intel_modeset_pipe_config(struct intel_atomic_state *state,
5964 struct intel_crtc_state *pipe_config)
5966 struct drm_crtc *crtc = pipe_config->uapi.crtc;
5967 struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5968 struct drm_connector *connector;
5969 struct drm_connector_state *connector_state;
5970 int base_bpp, ret, i;
5973 pipe_config->cpu_transcoder =
5974 (enum transcoder) to_intel_crtc(crtc)->pipe;
5977 * Sanitize sync polarity flags based on requested ones. If neither
5978 * positive or negative polarity is requested, treat this as meaning
5979 * negative polarity.
5981 if (!(pipe_config->hw.adjusted_mode.flags &
5982 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5983 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5985 if (!(pipe_config->hw.adjusted_mode.flags &
5986 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5987 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5989 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
5994 base_bpp = pipe_config->pipe_bpp;
5997 * Determine the real pipe dimensions. Note that stereo modes can
5998 * increase the actual pipe size due to the frame doubling and
5999 * insertion of additional space for blanks between the frame. This
6000 * is stored in the crtc timings. We use the requested mode to do this
6001 * computation to clearly distinguish it from the adjusted mode, which
6002 * can be changed by the connectors in the below retry loop.
6004 drm_mode_get_hv_timing(&pipe_config->hw.mode,
6005 &pipe_config->pipe_src_w,
6006 &pipe_config->pipe_src_h);
6008 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6009 struct intel_encoder *encoder =
6010 to_intel_encoder(connector_state->best_encoder);
6012 if (connector_state->crtc != crtc)
6015 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6016 drm_dbg_kms(&i915->drm,
6017 "rejecting invalid cloning configuration\n");
6022 * Determine output_types before calling the .compute_config()
6023 * hooks so that the hooks can use this information safely.
6025 if (encoder->compute_output_type)
6026 pipe_config->output_types |=
6027 BIT(encoder->compute_output_type(encoder, pipe_config,
6030 pipe_config->output_types |= BIT(encoder->type);
6034 /* Ensure the port clock defaults are reset when retrying. */
6035 pipe_config->port_clock = 0;
6036 pipe_config->pixel_multiplier = 1;
6038 /* Fill in default crtc timings, allow encoders to overwrite them. */
6039 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6040 CRTC_STEREO_DOUBLE);
6042 /* Pass our mode to the connectors and the CRTC to give them a chance to
6043 * adjust it according to limitations or connector properties, and also
6044 * a chance to reject the mode entirely.
6046 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6047 struct intel_encoder *encoder =
6048 to_intel_encoder(connector_state->best_encoder);
6050 if (connector_state->crtc != crtc)
6053 ret = encoder->compute_config(encoder, pipe_config,
6055 if (ret == -EDEADLK)
6058 drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6063 /* Set default port clock if not overwritten by the encoder. Needs to be
6064 * done afterwards in case the encoder adjusts the mode. */
6065 if (!pipe_config->port_clock)
6066 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6067 * pipe_config->pixel_multiplier;
6069 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6070 if (ret == -EDEADLK)
6072 if (ret == -EAGAIN) {
6073 if (drm_WARN(&i915->drm, !retry,
6074 "loop in pipe configuration computation\n"))
6077 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6082 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6086 /* Dithering seems to not pass-through bits correctly when it should, so
6087 * only enable it on 6bpc panels and when its not a compliance
6088 * test requesting 6bpc video pattern.
6090 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6091 !pipe_config->dither_force_disable;
6092 drm_dbg_kms(&i915->drm,
6093 "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6094 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6100 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6102 struct intel_atomic_state *state =
6103 to_intel_atomic_state(crtc_state->uapi.state);
6104 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6105 struct drm_connector_state *conn_state;
6106 struct drm_connector *connector;
6109 for_each_new_connector_in_state(&state->base, connector,
6111 struct intel_encoder *encoder =
6112 to_intel_encoder(conn_state->best_encoder);
6115 if (conn_state->crtc != &crtc->base ||
6116 !encoder->compute_config_late)
6119 ret = encoder->compute_config_late(encoder, crtc_state,
6128 bool intel_fuzzy_clock_check(int clock1, int clock2)
6132 if (clock1 == clock2)
6135 if (!clock1 || !clock2)
6138 diff = abs(clock1 - clock2);
6140 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6147 intel_compare_m_n(unsigned int m, unsigned int n,
6148 unsigned int m2, unsigned int n2,
6151 if (m == m2 && n == n2)
6154 if (exact || !m || !n || !m2 || !n2)
6157 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6164 } else if (n < n2) {
6174 return intel_fuzzy_clock_check(m, m2);
6178 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6179 const struct intel_link_m_n *m2_n2,
6182 return m_n->tu == m2_n2->tu &&
6183 intel_compare_m_n(m_n->data_m, m_n->data_n,
6184 m2_n2->data_m, m2_n2->data_n, exact) &&
6185 intel_compare_m_n(m_n->link_m, m_n->link_n,
6186 m2_n2->link_m, m2_n2->link_n, exact);
6190 intel_compare_infoframe(const union hdmi_infoframe *a,
6191 const union hdmi_infoframe *b)
6193 return memcmp(a, b, sizeof(*a)) == 0;
6197 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6198 const struct drm_dp_vsc_sdp *b)
6200 return memcmp(a, b, sizeof(*a)) == 0;
6204 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6205 bool fastset, const char *name,
6206 const union hdmi_infoframe *a,
6207 const union hdmi_infoframe *b)
6210 if (!drm_debug_enabled(DRM_UT_KMS))
6213 drm_dbg_kms(&dev_priv->drm,
6214 "fastset mismatch in %s infoframe\n", name);
6215 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6216 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6217 drm_dbg_kms(&dev_priv->drm, "found:\n");
6218 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6220 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6221 drm_err(&dev_priv->drm, "expected:\n");
6222 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6223 drm_err(&dev_priv->drm, "found:\n");
6224 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6229 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6230 bool fastset, const char *name,
6231 const struct drm_dp_vsc_sdp *a,
6232 const struct drm_dp_vsc_sdp *b)
6235 if (!drm_debug_enabled(DRM_UT_KMS))
6238 drm_dbg_kms(&dev_priv->drm,
6239 "fastset mismatch in %s dp sdp\n", name);
6240 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6241 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6242 drm_dbg_kms(&dev_priv->drm, "found:\n");
6243 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6245 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6246 drm_err(&dev_priv->drm, "expected:\n");
6247 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6248 drm_err(&dev_priv->drm, "found:\n");
6249 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6253 static void __printf(4, 5)
6254 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6255 const char *name, const char *format, ...)
6257 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6258 struct va_format vaf;
6261 va_start(args, format);
6266 drm_dbg_kms(&i915->drm,
6267 "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6268 crtc->base.base.id, crtc->base.name, name, &vaf);
6270 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6271 crtc->base.base.id, crtc->base.name, name, &vaf);
6276 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6278 if (dev_priv->params.fastboot != -1)
6279 return dev_priv->params.fastboot;
6281 /* Enable fastboot by default on Skylake and newer */
6282 if (DISPLAY_VER(dev_priv) >= 9)
6285 /* Enable fastboot by default on VLV and CHV */
6286 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6289 /* Disabled by default on all others */
6294 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6295 const struct intel_crtc_state *pipe_config,
6298 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6299 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6302 bool fixup_inherited = fastset &&
6303 current_config->inherited && !pipe_config->inherited;
6305 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6306 drm_dbg_kms(&dev_priv->drm,
6307 "initial modeset and fastboot not set\n");
6311 #define PIPE_CONF_CHECK_X(name) do { \
6312 if (current_config->name != pipe_config->name) { \
6313 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6314 "(expected 0x%08x, found 0x%08x)", \
6315 current_config->name, \
6316 pipe_config->name); \
6321 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6322 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6323 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6324 "(expected 0x%08x, found 0x%08x)", \
6325 current_config->name & (mask), \
6326 pipe_config->name & (mask)); \
6331 #define PIPE_CONF_CHECK_I(name) do { \
6332 if (current_config->name != pipe_config->name) { \
6333 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6334 "(expected %i, found %i)", \
6335 current_config->name, \
6336 pipe_config->name); \
6341 #define PIPE_CONF_CHECK_BOOL(name) do { \
6342 if (current_config->name != pipe_config->name) { \
6343 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6344 "(expected %s, found %s)", \
6345 yesno(current_config->name), \
6346 yesno(pipe_config->name)); \
6352 * Checks state where we only read out the enabling, but not the entire
6353 * state itself (like full infoframes or ELD for audio). These states
6354 * require a full modeset on bootup to fix up.
6356 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6357 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6358 PIPE_CONF_CHECK_BOOL(name); \
6360 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6361 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6362 yesno(current_config->name), \
6363 yesno(pipe_config->name)); \
6368 #define PIPE_CONF_CHECK_P(name) do { \
6369 if (current_config->name != pipe_config->name) { \
6370 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6371 "(expected %p, found %p)", \
6372 current_config->name, \
6373 pipe_config->name); \
6378 #define PIPE_CONF_CHECK_M_N(name) do { \
6379 if (!intel_compare_link_m_n(¤t_config->name, \
6380 &pipe_config->name,\
6382 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6383 "(expected tu %i data %i/%i link %i/%i, " \
6384 "found tu %i, data %i/%i link %i/%i)", \
6385 current_config->name.tu, \
6386 current_config->name.data_m, \
6387 current_config->name.data_n, \
6388 current_config->name.link_m, \
6389 current_config->name.link_n, \
6390 pipe_config->name.tu, \
6391 pipe_config->name.data_m, \
6392 pipe_config->name.data_n, \
6393 pipe_config->name.link_m, \
6394 pipe_config->name.link_n); \
6399 /* This is required for BDW+ where there is only one set of registers for
6400 * switching between high and low RR.
6401 * This macro can be used whenever a comparison has to be made between one
6402 * hw state and multiple sw state variables.
6404 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6405 if (!intel_compare_link_m_n(¤t_config->name, \
6406 &pipe_config->name, !fastset) && \
6407 !intel_compare_link_m_n(¤t_config->alt_name, \
6408 &pipe_config->name, !fastset)) { \
6409 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6410 "(expected tu %i data %i/%i link %i/%i, " \
6411 "or tu %i data %i/%i link %i/%i, " \
6412 "found tu %i, data %i/%i link %i/%i)", \
6413 current_config->name.tu, \
6414 current_config->name.data_m, \
6415 current_config->name.data_n, \
6416 current_config->name.link_m, \
6417 current_config->name.link_n, \
6418 current_config->alt_name.tu, \
6419 current_config->alt_name.data_m, \
6420 current_config->alt_name.data_n, \
6421 current_config->alt_name.link_m, \
6422 current_config->alt_name.link_n, \
6423 pipe_config->name.tu, \
6424 pipe_config->name.data_m, \
6425 pipe_config->name.data_n, \
6426 pipe_config->name.link_m, \
6427 pipe_config->name.link_n); \
6432 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6433 if ((current_config->name ^ pipe_config->name) & (mask)) { \
6434 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6435 "(%x) (expected %i, found %i)", \
6437 current_config->name & (mask), \
6438 pipe_config->name & (mask)); \
6443 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6444 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6445 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6446 "(expected %i, found %i)", \
6447 current_config->name, \
6448 pipe_config->name); \
6453 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6454 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
6455 &pipe_config->infoframes.name)) { \
6456 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6457 ¤t_config->infoframes.name, \
6458 &pipe_config->infoframes.name); \
6463 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6464 if (!current_config->has_psr && !pipe_config->has_psr && \
6465 !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
6466 &pipe_config->infoframes.name)) { \
6467 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6468 ¤t_config->infoframes.name, \
6469 &pipe_config->infoframes.name); \
6474 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6475 if (current_config->name1 != pipe_config->name1) { \
6476 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6477 "(expected %i, found %i, won't compare lut values)", \
6478 current_config->name1, \
6479 pipe_config->name1); \
6482 if (!intel_color_lut_equal(current_config->name2, \
6483 pipe_config->name2, pipe_config->name1, \
6485 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6486 "hw_state doesn't match sw_state"); \
6492 #define PIPE_CONF_QUIRK(quirk) \
6493 ((current_config->quirks | pipe_config->quirks) & (quirk))
6495 PIPE_CONF_CHECK_I(cpu_transcoder);
6497 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6498 PIPE_CONF_CHECK_I(fdi_lanes);
6499 PIPE_CONF_CHECK_M_N(fdi_m_n);
6501 PIPE_CONF_CHECK_I(lane_count);
6502 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6504 if (DISPLAY_VER(dev_priv) < 8) {
6505 PIPE_CONF_CHECK_M_N(dp_m_n);
6507 if (current_config->has_drrs)
6508 PIPE_CONF_CHECK_M_N(dp_m2_n2);
6510 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6512 PIPE_CONF_CHECK_X(output_types);
6514 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6515 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6516 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6517 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6518 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6519 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6521 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6522 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6523 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6524 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6525 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6526 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6528 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6529 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6530 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6531 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6532 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6533 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6535 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6536 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6537 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6538 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6539 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6540 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6542 PIPE_CONF_CHECK_I(pixel_multiplier);
6544 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6545 DRM_MODE_FLAG_INTERLACE);
6547 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6548 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6549 DRM_MODE_FLAG_PHSYNC);
6550 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6551 DRM_MODE_FLAG_NHSYNC);
6552 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6553 DRM_MODE_FLAG_PVSYNC);
6554 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6555 DRM_MODE_FLAG_NVSYNC);
6558 PIPE_CONF_CHECK_I(output_format);
6559 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6560 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6561 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6562 PIPE_CONF_CHECK_BOOL(limited_color_range);
6564 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6565 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6566 PIPE_CONF_CHECK_BOOL(has_infoframe);
6567 PIPE_CONF_CHECK_BOOL(fec_enable);
6569 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6571 PIPE_CONF_CHECK_X(gmch_pfit.control);
6572 /* pfit ratios are autocomputed by the hw on gen4+ */
6573 if (DISPLAY_VER(dev_priv) < 4)
6574 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6575 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6578 * Changing the EDP transcoder input mux
6579 * (A_ONOFF vs. A_ON) requires a full modeset.
6581 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6584 PIPE_CONF_CHECK_I(pipe_src_w);
6585 PIPE_CONF_CHECK_I(pipe_src_h);
6587 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6588 if (current_config->pch_pfit.enabled) {
6589 PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6590 PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6591 PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6592 PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6595 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6596 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6598 PIPE_CONF_CHECK_X(gamma_mode);
6599 if (IS_CHERRYVIEW(dev_priv))
6600 PIPE_CONF_CHECK_X(cgm_mode);
6602 PIPE_CONF_CHECK_X(csc_mode);
6603 PIPE_CONF_CHECK_BOOL(gamma_enable);
6604 PIPE_CONF_CHECK_BOOL(csc_enable);
6606 PIPE_CONF_CHECK_I(linetime);
6607 PIPE_CONF_CHECK_I(ips_linetime);
6609 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6611 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6613 if (current_config->active_planes) {
6614 PIPE_CONF_CHECK_BOOL(has_psr);
6615 PIPE_CONF_CHECK_BOOL(has_psr2);
6616 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6617 PIPE_CONF_CHECK_I(dc3co_exitline);
6621 PIPE_CONF_CHECK_BOOL(double_wide);
6623 if (dev_priv->dpll.mgr) {
6624 PIPE_CONF_CHECK_P(shared_dpll);
6626 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6627 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6628 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6629 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6630 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6631 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6632 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6633 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6634 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6635 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6636 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6637 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6638 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6639 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6640 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6641 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6642 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6643 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6644 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6645 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6646 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6647 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6648 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6649 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6650 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6651 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6652 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6653 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6654 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6655 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6656 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6659 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6660 PIPE_CONF_CHECK_X(dsi_pll.div);
6662 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6663 PIPE_CONF_CHECK_I(pipe_bpp);
6665 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6666 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6667 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6669 PIPE_CONF_CHECK_I(min_voltage_level);
6671 if (current_config->has_psr || pipe_config->has_psr)
6672 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6673 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6675 PIPE_CONF_CHECK_X(infoframes.enable);
6677 PIPE_CONF_CHECK_X(infoframes.gcp);
6678 PIPE_CONF_CHECK_INFOFRAME(avi);
6679 PIPE_CONF_CHECK_INFOFRAME(spd);
6680 PIPE_CONF_CHECK_INFOFRAME(hdmi);
6681 PIPE_CONF_CHECK_INFOFRAME(drm);
6682 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6684 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6685 PIPE_CONF_CHECK_I(master_transcoder);
6686 PIPE_CONF_CHECK_BOOL(bigjoiner);
6687 PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6688 PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6690 PIPE_CONF_CHECK_I(dsc.compression_enable);
6691 PIPE_CONF_CHECK_I(dsc.dsc_split);
6692 PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6694 PIPE_CONF_CHECK_BOOL(splitter.enable);
6695 PIPE_CONF_CHECK_I(splitter.link_count);
6696 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6698 PIPE_CONF_CHECK_I(mst_master_transcoder);
6700 PIPE_CONF_CHECK_BOOL(vrr.enable);
6701 PIPE_CONF_CHECK_I(vrr.vmin);
6702 PIPE_CONF_CHECK_I(vrr.vmax);
6703 PIPE_CONF_CHECK_I(vrr.flipline);
6704 PIPE_CONF_CHECK_I(vrr.pipeline_full);
6705 PIPE_CONF_CHECK_I(vrr.guardband);
6707 #undef PIPE_CONF_CHECK_X
6708 #undef PIPE_CONF_CHECK_I
6709 #undef PIPE_CONF_CHECK_BOOL
6710 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6711 #undef PIPE_CONF_CHECK_P
6712 #undef PIPE_CONF_CHECK_FLAGS
6713 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6714 #undef PIPE_CONF_CHECK_COLOR_LUT
6715 #undef PIPE_CONF_QUIRK
6720 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6721 const struct intel_crtc_state *pipe_config)
6723 if (pipe_config->has_pch_encoder) {
6724 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6725 &pipe_config->fdi_m_n);
6726 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6729 * FDI already provided one idea for the dotclock.
6730 * Yell if the encoder disagrees.
6732 drm_WARN(&dev_priv->drm,
6733 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6734 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6735 fdi_dotclock, dotclock);
6739 static void verify_wm_state(struct intel_crtc *crtc,
6740 struct intel_crtc_state *new_crtc_state)
6742 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6743 struct skl_hw_state {
6744 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6745 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6746 struct skl_pipe_wm wm;
6748 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6749 int level, max_level = ilk_wm_max_level(dev_priv);
6750 struct intel_plane *plane;
6751 u8 hw_enabled_slices;
6753 if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6756 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6760 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6762 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6764 hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6766 if (DISPLAY_VER(dev_priv) >= 11 &&
6767 hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6768 drm_err(&dev_priv->drm,
6769 "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6770 dev_priv->dbuf.enabled_slices,
6773 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6774 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6775 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6778 for (level = 0; level <= max_level; level++) {
6779 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6780 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6782 if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6785 drm_err(&dev_priv->drm,
6786 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6787 plane->base.base.id, plane->base.name, level,
6788 sw_wm_level->enable,
6789 sw_wm_level->blocks,
6791 hw_wm_level->enable,
6792 hw_wm_level->blocks,
6793 hw_wm_level->lines);
6796 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6797 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6799 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6800 drm_err(&dev_priv->drm,
6801 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6802 plane->base.base.id, plane->base.name,
6803 sw_wm_level->enable,
6804 sw_wm_level->blocks,
6806 hw_wm_level->enable,
6807 hw_wm_level->blocks,
6808 hw_wm_level->lines);
6811 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6812 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6814 if (HAS_HW_SAGV_WM(dev_priv) &&
6815 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6816 drm_err(&dev_priv->drm,
6817 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6818 plane->base.base.id, plane->base.name,
6819 sw_wm_level->enable,
6820 sw_wm_level->blocks,
6822 hw_wm_level->enable,
6823 hw_wm_level->blocks,
6824 hw_wm_level->lines);
6827 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6828 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6830 if (HAS_HW_SAGV_WM(dev_priv) &&
6831 !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6832 drm_err(&dev_priv->drm,
6833 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6834 plane->base.base.id, plane->base.name,
6835 sw_wm_level->enable,
6836 sw_wm_level->blocks,
6838 hw_wm_level->enable,
6839 hw_wm_level->blocks,
6840 hw_wm_level->lines);
6844 hw_ddb_entry = &hw->ddb_y[plane->id];
6845 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6847 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6848 drm_err(&dev_priv->drm,
6849 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6850 plane->base.base.id, plane->base.name,
6851 sw_ddb_entry->start, sw_ddb_entry->end,
6852 hw_ddb_entry->start, hw_ddb_entry->end);
6860 verify_connector_state(struct intel_atomic_state *state,
6861 struct intel_crtc *crtc)
6863 struct drm_connector *connector;
6864 struct drm_connector_state *new_conn_state;
6867 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6868 struct drm_encoder *encoder = connector->encoder;
6869 struct intel_crtc_state *crtc_state = NULL;
6871 if (new_conn_state->crtc != &crtc->base)
6875 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6877 intel_connector_verify_state(crtc_state, new_conn_state);
6879 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6880 "connector's atomic encoder doesn't match legacy encoder\n");
6885 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6887 struct intel_encoder *encoder;
6888 struct drm_connector *connector;
6889 struct drm_connector_state *old_conn_state, *new_conn_state;
6892 for_each_intel_encoder(&dev_priv->drm, encoder) {
6893 bool enabled = false, found = false;
6896 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6897 encoder->base.base.id,
6898 encoder->base.name);
6900 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6901 new_conn_state, i) {
6902 if (old_conn_state->best_encoder == &encoder->base)
6905 if (new_conn_state->best_encoder != &encoder->base)
6907 found = enabled = true;
6909 I915_STATE_WARN(new_conn_state->crtc !=
6911 "connector's crtc doesn't match encoder crtc\n");
6917 I915_STATE_WARN(!!encoder->base.crtc != enabled,
6918 "encoder's enabled state mismatch "
6919 "(expected %i, found %i)\n",
6920 !!encoder->base.crtc, enabled);
6922 if (!encoder->base.crtc) {
6925 active = encoder->get_hw_state(encoder, &pipe);
6926 I915_STATE_WARN(active,
6927 "encoder detached but still enabled on pipe %c.\n",
6934 verify_crtc_state(struct intel_crtc *crtc,
6935 struct intel_crtc_state *old_crtc_state,
6936 struct intel_crtc_state *new_crtc_state)
6938 struct drm_device *dev = crtc->base.dev;
6939 struct drm_i915_private *dev_priv = to_i915(dev);
6940 struct intel_encoder *encoder;
6941 struct intel_crtc_state *pipe_config = old_crtc_state;
6942 struct drm_atomic_state *state = old_crtc_state->uapi.state;
6943 struct intel_crtc *master_crtc;
6945 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6946 intel_crtc_free_hw_state(old_crtc_state);
6947 intel_crtc_state_reset(old_crtc_state, crtc);
6948 old_crtc_state->uapi.state = state;
6950 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6953 pipe_config->hw.enable = new_crtc_state->hw.enable;
6955 intel_crtc_get_pipe_config(pipe_config);
6957 /* we keep both pipes enabled on 830 */
6958 if (IS_I830(dev_priv) && pipe_config->hw.active)
6959 pipe_config->hw.active = new_crtc_state->hw.active;
6961 I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6962 "crtc active state doesn't match with hw state "
6963 "(expected %i, found %i)\n",
6964 new_crtc_state->hw.active, pipe_config->hw.active);
6966 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
6967 "transitional active state does not match atomic hw state "
6968 "(expected %i, found %i)\n",
6969 new_crtc_state->hw.active, crtc->active);
6971 master_crtc = intel_master_crtc(new_crtc_state);
6973 for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
6977 active = encoder->get_hw_state(encoder, &pipe);
6978 I915_STATE_WARN(active != new_crtc_state->hw.active,
6979 "[ENCODER:%i] active %i with crtc active %i\n",
6980 encoder->base.base.id, active,
6981 new_crtc_state->hw.active);
6983 I915_STATE_WARN(active && master_crtc->pipe != pipe,
6984 "Encoder connected to wrong pipe %c\n",
6988 intel_encoder_get_config(encoder, pipe_config);
6991 if (!new_crtc_state->hw.active)
6994 intel_pipe_config_sanity_check(dev_priv, pipe_config);
6996 if (!intel_pipe_config_compare(new_crtc_state,
6997 pipe_config, false)) {
6998 I915_STATE_WARN(1, "pipe state doesn't match!\n");
6999 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
7000 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7005 intel_verify_planes(struct intel_atomic_state *state)
7007 struct intel_plane *plane;
7008 const struct intel_plane_state *plane_state;
7011 for_each_new_intel_plane_in_state(state, plane,
7013 assert_plane(plane, plane_state->planar_slave ||
7014 plane_state->uapi.visible);
7018 verify_single_dpll_state(struct drm_i915_private *dev_priv,
7019 struct intel_shared_dpll *pll,
7020 struct intel_crtc *crtc,
7021 struct intel_crtc_state *new_crtc_state)
7023 struct intel_dpll_hw_state dpll_hw_state;
7027 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7029 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7031 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7033 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7034 I915_STATE_WARN(!pll->on && pll->active_mask,
7035 "pll in active use but not on in sw tracking\n");
7036 I915_STATE_WARN(pll->on && !pll->active_mask,
7037 "pll is on but not used by any active pipe\n");
7038 I915_STATE_WARN(pll->on != active,
7039 "pll on state mismatch (expected %i, found %i)\n",
7044 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7045 "more active pll users than references: 0x%x vs 0x%x\n",
7046 pll->active_mask, pll->state.pipe_mask);
7051 pipe_mask = BIT(crtc->pipe);
7053 if (new_crtc_state->hw.active)
7054 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7055 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7056 pipe_name(crtc->pipe), pll->active_mask);
7058 I915_STATE_WARN(pll->active_mask & pipe_mask,
7059 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7060 pipe_name(crtc->pipe), pll->active_mask);
7062 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7063 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7064 pipe_mask, pll->state.pipe_mask);
7066 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7068 sizeof(dpll_hw_state)),
7069 "pll hw state mismatch\n");
7073 verify_shared_dpll_state(struct intel_crtc *crtc,
7074 struct intel_crtc_state *old_crtc_state,
7075 struct intel_crtc_state *new_crtc_state)
7077 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7079 if (new_crtc_state->shared_dpll)
7080 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7082 if (old_crtc_state->shared_dpll &&
7083 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7084 u8 pipe_mask = BIT(crtc->pipe);
7085 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7087 I915_STATE_WARN(pll->active_mask & pipe_mask,
7088 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7089 pipe_name(crtc->pipe), pll->active_mask);
7090 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7091 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7092 pipe_name(crtc->pipe), pll->state.pipe_mask);
7097 verify_mpllb_state(struct intel_atomic_state *state,
7098 struct intel_crtc_state *new_crtc_state)
7100 struct drm_i915_private *i915 = to_i915(state->base.dev);
7101 struct intel_mpllb_state mpllb_hw_state = { 0 };
7102 struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7103 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7104 struct intel_encoder *encoder;
7109 if (!new_crtc_state->hw.active)
7112 encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7113 intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7115 #define MPLLB_CHECK(name) do { \
7116 if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7117 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7118 "(expected 0x%08x, found 0x%08x)", \
7119 mpllb_sw_state->name, \
7120 mpllb_hw_state.name); \
7124 MPLLB_CHECK(mpllb_cp);
7125 MPLLB_CHECK(mpllb_div);
7126 MPLLB_CHECK(mpllb_div2);
7127 MPLLB_CHECK(mpllb_fracn1);
7128 MPLLB_CHECK(mpllb_fracn2);
7129 MPLLB_CHECK(mpllb_sscen);
7130 MPLLB_CHECK(mpllb_sscstep);
7133 * ref_control is handled by the hardware/firemware and never
7134 * programmed by the software, but the proper values are supplied
7135 * in the bspec for verification purposes.
7137 MPLLB_CHECK(ref_control);
7143 intel_modeset_verify_crtc(struct intel_crtc *crtc,
7144 struct intel_atomic_state *state,
7145 struct intel_crtc_state *old_crtc_state,
7146 struct intel_crtc_state *new_crtc_state)
7148 if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7151 verify_wm_state(crtc, new_crtc_state);
7152 verify_connector_state(state, crtc);
7153 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7154 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7155 verify_mpllb_state(state, new_crtc_state);
7159 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7163 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7164 verify_single_dpll_state(dev_priv,
7165 &dev_priv->dpll.shared_dplls[i],
7170 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7171 struct intel_atomic_state *state)
7173 verify_encoder_state(dev_priv, state);
7174 verify_connector_state(state, NULL);
7175 verify_disabled_dpll_state(dev_priv);
7178 int intel_modeset_all_pipes(struct intel_atomic_state *state)
7180 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7181 struct intel_crtc *crtc;
7184 * Add all pipes to the state, and force
7185 * a modeset on all the active ones.
7187 for_each_intel_crtc(&dev_priv->drm, crtc) {
7188 struct intel_crtc_state *crtc_state;
7191 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7192 if (IS_ERR(crtc_state))
7193 return PTR_ERR(crtc_state);
7195 if (!crtc_state->hw.active ||
7196 drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7199 crtc_state->uapi.mode_changed = true;
7201 ret = drm_atomic_add_affected_connectors(&state->base,
7206 ret = intel_atomic_add_affected_planes(state, crtc);
7210 crtc_state->update_planes |= crtc_state->active_planes;
7217 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7219 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7220 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7221 struct drm_display_mode adjusted_mode =
7222 crtc_state->hw.adjusted_mode;
7224 if (crtc_state->vrr.enable) {
7225 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7226 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7227 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7228 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7231 drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7233 crtc->mode_flags = crtc_state->mode_flags;
7236 * The scanline counter increments at the leading edge of hsync.
7238 * On most platforms it starts counting from vtotal-1 on the
7239 * first active line. That means the scanline counter value is
7240 * always one less than what we would expect. Ie. just after
7241 * start of vblank, which also occurs at start of hsync (on the
7242 * last active line), the scanline counter will read vblank_start-1.
7244 * On gen2 the scanline counter starts counting from 1 instead
7245 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7246 * to keep the value positive), instead of adding one.
7248 * On HSW+ the behaviour of the scanline counter depends on the output
7249 * type. For DP ports it behaves like most other platforms, but on HDMI
7250 * there's an extra 1 line difference. So we need to add two instead of
7253 * On VLV/CHV DSI the scanline counter would appear to increment
7254 * approx. 1/3 of a scanline before start of vblank. Unfortunately
7255 * that means we can't tell whether we're in vblank or not while
7256 * we're on that particular line. We must still set scanline_offset
7257 * to 1 so that the vblank timestamps come out correct when we query
7258 * the scanline counter from within the vblank interrupt handler.
7259 * However if queried just before the start of vblank we'll get an
7260 * answer that's slightly in the future.
7262 if (DISPLAY_VER(dev_priv) == 2) {
7265 vtotal = adjusted_mode.crtc_vtotal;
7266 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7269 crtc->scanline_offset = vtotal - 1;
7270 } else if (HAS_DDI(dev_priv) &&
7271 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7272 crtc->scanline_offset = 2;
7274 crtc->scanline_offset = 1;
7278 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7280 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7281 struct intel_crtc_state *new_crtc_state;
7282 struct intel_crtc *crtc;
7285 if (!dev_priv->dpll_funcs)
7288 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7289 if (!intel_crtc_needs_modeset(new_crtc_state))
7292 intel_release_shared_dplls(state, crtc);
7297 * This implements the workaround described in the "notes" section of the mode
7298 * set sequence documentation. When going from no pipes or single pipe to
7299 * multiple pipes, and planes are enabled after the pipe, we need to wait at
7300 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7302 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7304 struct intel_crtc_state *crtc_state;
7305 struct intel_crtc *crtc;
7306 struct intel_crtc_state *first_crtc_state = NULL;
7307 struct intel_crtc_state *other_crtc_state = NULL;
7308 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7311 /* look at all crtc's that are going to be enabled in during modeset */
7312 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7313 if (!crtc_state->hw.active ||
7314 !intel_crtc_needs_modeset(crtc_state))
7317 if (first_crtc_state) {
7318 other_crtc_state = crtc_state;
7321 first_crtc_state = crtc_state;
7322 first_pipe = crtc->pipe;
7326 /* No workaround needed? */
7327 if (!first_crtc_state)
7330 /* w/a possibly needed, check how many crtc's are already enabled. */
7331 for_each_intel_crtc(state->base.dev, crtc) {
7332 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7333 if (IS_ERR(crtc_state))
7334 return PTR_ERR(crtc_state);
7336 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7338 if (!crtc_state->hw.active ||
7339 intel_crtc_needs_modeset(crtc_state))
7342 /* 2 or more enabled crtcs means no need for w/a */
7343 if (enabled_pipe != INVALID_PIPE)
7346 enabled_pipe = crtc->pipe;
7349 if (enabled_pipe != INVALID_PIPE)
7350 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7351 else if (other_crtc_state)
7352 other_crtc_state->hsw_workaround_pipe = first_pipe;
7357 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7360 const struct intel_crtc_state *crtc_state;
7361 struct intel_crtc *crtc;
7364 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7365 if (crtc_state->hw.active)
7366 active_pipes |= BIT(crtc->pipe);
7368 active_pipes &= ~BIT(crtc->pipe);
7371 return active_pipes;
7374 static int intel_modeset_checks(struct intel_atomic_state *state)
7376 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7378 state->modeset = true;
7380 if (IS_HASWELL(dev_priv))
7381 return hsw_mode_set_planes_workaround(state);
7386 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7387 struct intel_crtc_state *new_crtc_state)
7389 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7392 new_crtc_state->uapi.mode_changed = false;
7393 new_crtc_state->update_pipe = true;
7396 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7397 struct intel_crtc_state *new_crtc_state)
7400 * If we're not doing the full modeset we want to
7401 * keep the current M/N values as they may be
7402 * sufficiently different to the computed values
7403 * to cause problems.
7405 * FIXME: should really copy more fuzzy state here
7407 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7408 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7409 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7410 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7413 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7414 struct intel_crtc *crtc,
7417 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7418 struct intel_plane *plane;
7420 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7421 struct intel_plane_state *plane_state;
7423 if ((plane_ids_mask & BIT(plane->id)) == 0)
7426 plane_state = intel_atomic_get_plane_state(state, plane);
7427 if (IS_ERR(plane_state))
7428 return PTR_ERR(plane_state);
7434 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7435 struct intel_crtc *crtc)
7437 const struct intel_crtc_state *old_crtc_state =
7438 intel_atomic_get_old_crtc_state(state, crtc);
7439 const struct intel_crtc_state *new_crtc_state =
7440 intel_atomic_get_new_crtc_state(state, crtc);
7442 return intel_crtc_add_planes_to_state(state, crtc,
7443 old_crtc_state->enabled_planes |
7444 new_crtc_state->enabled_planes);
7447 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7449 /* See {hsw,vlv,ivb}_plane_ratio() */
7450 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7451 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7452 IS_IVYBRIDGE(dev_priv);
7455 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7456 struct intel_crtc *crtc,
7457 struct intel_crtc *other)
7459 const struct intel_plane_state *plane_state;
7460 struct intel_plane *plane;
7464 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7465 if (plane->pipe == crtc->pipe)
7466 plane_ids |= BIT(plane->id);
7469 return intel_crtc_add_planes_to_state(state, other, plane_ids);
7472 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7474 const struct intel_crtc_state *crtc_state;
7475 struct intel_crtc *crtc;
7478 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7481 if (!crtc_state->bigjoiner)
7484 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7485 crtc_state->bigjoiner_linked_crtc);
7493 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7495 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7496 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7497 struct intel_plane_state *plane_state;
7498 struct intel_plane *plane;
7499 struct intel_crtc *crtc;
7502 ret = icl_add_linked_planes(state);
7506 ret = intel_bigjoiner_add_affected_planes(state);
7510 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7511 ret = intel_plane_atomic_check(state, plane);
7513 drm_dbg_atomic(&dev_priv->drm,
7514 "[PLANE:%d:%s] atomic driver check failed\n",
7515 plane->base.base.id, plane->base.name);
7520 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7521 new_crtc_state, i) {
7522 u8 old_active_planes, new_active_planes;
7524 ret = icl_check_nv12_planes(new_crtc_state);
7529 * On some platforms the number of active planes affects
7530 * the planes' minimum cdclk calculation. Add such planes
7531 * to the state before we compute the minimum cdclk.
7533 if (!active_planes_affects_min_cdclk(dev_priv))
7536 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7537 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7539 if (hweight8(old_active_planes) == hweight8(new_active_planes))
7542 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7550 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7552 struct intel_crtc_state *crtc_state;
7553 struct intel_crtc *crtc;
7556 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7557 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7560 ret = intel_crtc_atomic_check(state, crtc);
7562 drm_dbg_atomic(&i915->drm,
7563 "[CRTC:%d:%s] atomic driver check failed\n",
7564 crtc->base.base.id, crtc->base.name);
7572 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7575 const struct intel_crtc_state *new_crtc_state;
7576 struct intel_crtc *crtc;
7579 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7580 if (new_crtc_state->hw.enable &&
7581 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7582 intel_crtc_needs_modeset(new_crtc_state))
7589 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7590 struct intel_crtc *crtc,
7591 struct intel_crtc_state *old_crtc_state,
7592 struct intel_crtc_state *new_crtc_state)
7594 struct drm_i915_private *i915 = to_i915(state->base.dev);
7595 struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7596 struct intel_crtc *slave_crtc, *master_crtc;
7598 /* slave being enabled, is master is still claiming this crtc? */
7599 if (old_crtc_state->bigjoiner_slave) {
7601 master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7602 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7603 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7607 if (!new_crtc_state->bigjoiner)
7610 slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7612 drm_dbg_kms(&i915->drm,
7613 "[CRTC:%d:%s] Big joiner configuration requires "
7614 "CRTC + 1 to be used, doesn't exist\n",
7615 crtc->base.base.id, crtc->base.name);
7619 new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7620 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7622 if (IS_ERR(slave_crtc_state))
7623 return PTR_ERR(slave_crtc_state);
7625 /* master being enabled, slave was already configured? */
7626 if (slave_crtc_state->uapi.enable)
7629 drm_dbg_kms(&i915->drm,
7630 "[CRTC:%d:%s] Used as slave for big joiner\n",
7631 slave_crtc->base.base.id, slave_crtc->base.name);
7633 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7636 drm_dbg_kms(&i915->drm,
7637 "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7638 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7639 slave_crtc->base.base.id, slave_crtc->base.name,
7640 master_crtc->base.base.id, master_crtc->base.name);
7644 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7645 struct intel_crtc_state *master_crtc_state)
7647 struct intel_crtc_state *slave_crtc_state =
7648 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7650 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7651 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7652 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7653 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7657 * DOC: asynchronous flip implementation
7659 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7660 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7661 * Correspondingly, support is currently added for primary plane only.
7663 * Async flip can only change the plane surface address, so anything else
7664 * changing is rejected from the intel_atomic_check_async() function.
7665 * Once this check is cleared, flip done interrupt is enabled using
7666 * the intel_crtc_enable_flip_done() function.
7668 * As soon as the surface address register is written, flip done interrupt is
7669 * generated and the requested events are sent to the usersapce in the interrupt
7670 * handler itself. The timestamp and sequence sent during the flip done event
7671 * correspond to the last vblank and have no relation to the actual time when
7672 * the flip done event was sent.
7674 static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7676 struct drm_i915_private *i915 = to_i915(state->base.dev);
7677 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7678 const struct intel_plane_state *new_plane_state, *old_plane_state;
7679 struct intel_plane *plane;
7682 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7683 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7685 if (intel_crtc_needs_modeset(new_crtc_state)) {
7686 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7690 if (!new_crtc_state->hw.active) {
7691 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7694 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7695 drm_dbg_kms(&i915->drm,
7696 "Active planes cannot be changed during async flip\n");
7700 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7701 new_plane_state, i) {
7702 if (plane->pipe != crtc->pipe)
7706 * TODO: Async flip is only supported through the page flip IOCTL
7707 * as of now. So support currently added for primary plane only.
7708 * Support for other planes on platforms on which supports
7709 * this(vlv/chv and icl+) should be added when async flip is
7710 * enabled in the atomic IOCTL path.
7712 if (!plane->async_flip)
7716 * FIXME: This check is kept generic for all platforms.
7717 * Need to verify this for all gen9 platforms to enable
7718 * this selectively if required.
7720 switch (new_plane_state->hw.fb->modifier) {
7721 case I915_FORMAT_MOD_X_TILED:
7722 case I915_FORMAT_MOD_Y_TILED:
7723 case I915_FORMAT_MOD_Yf_TILED:
7726 drm_dbg_kms(&i915->drm,
7727 "Linear memory/CCS does not support async flips\n");
7731 if (new_plane_state->hw.fb->format->num_planes > 1) {
7732 drm_dbg_kms(&i915->drm,
7733 "Planar formats not supported with async flips\n");
7737 if (old_plane_state->view.color_plane[0].mapping_stride !=
7738 new_plane_state->view.color_plane[0].mapping_stride) {
7739 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7743 if (old_plane_state->hw.fb->modifier !=
7744 new_plane_state->hw.fb->modifier) {
7745 drm_dbg_kms(&i915->drm,
7746 "Framebuffer modifiers cannot be changed in async flip\n");
7750 if (old_plane_state->hw.fb->format !=
7751 new_plane_state->hw.fb->format) {
7752 drm_dbg_kms(&i915->drm,
7753 "Framebuffer format cannot be changed in async flip\n");
7757 if (old_plane_state->hw.rotation !=
7758 new_plane_state->hw.rotation) {
7759 drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7763 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7764 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7765 drm_dbg_kms(&i915->drm,
7766 "Plane size/co-ordinates cannot be changed in async flip\n");
7770 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7771 drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7775 if (old_plane_state->hw.pixel_blend_mode !=
7776 new_plane_state->hw.pixel_blend_mode) {
7777 drm_dbg_kms(&i915->drm,
7778 "Pixel blend mode cannot be changed in async flip\n");
7782 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7783 drm_dbg_kms(&i915->drm,
7784 "Color encoding cannot be changed in async flip\n");
7788 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7789 drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7793 /* plane decryption is allow to change only in synchronous flips */
7794 if (old_plane_state->decrypt != new_plane_state->decrypt)
7801 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7803 struct intel_crtc_state *crtc_state;
7804 struct intel_crtc *crtc;
7807 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7808 struct intel_crtc_state *linked_crtc_state;
7809 struct intel_crtc *linked_crtc;
7812 if (!crtc_state->bigjoiner)
7815 linked_crtc = crtc_state->bigjoiner_linked_crtc;
7816 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7817 if (IS_ERR(linked_crtc_state))
7818 return PTR_ERR(linked_crtc_state);
7820 if (!intel_crtc_needs_modeset(crtc_state))
7823 linked_crtc_state->uapi.mode_changed = true;
7825 ret = drm_atomic_add_affected_connectors(&state->base,
7826 &linked_crtc->base);
7830 ret = intel_atomic_add_affected_planes(state, linked_crtc);
7835 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7836 /* Kill old bigjoiner link, we may re-establish afterwards */
7837 if (intel_crtc_needs_modeset(crtc_state) &&
7838 crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7839 kill_bigjoiner_slave(state, crtc_state);
7846 * intel_atomic_check - validate state object
7848 * @_state: state to validate
7850 static int intel_atomic_check(struct drm_device *dev,
7851 struct drm_atomic_state *_state)
7853 struct drm_i915_private *dev_priv = to_i915(dev);
7854 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7855 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7856 struct intel_crtc *crtc;
7858 bool any_ms = false;
7860 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7861 new_crtc_state, i) {
7862 if (new_crtc_state->inherited != old_crtc_state->inherited)
7863 new_crtc_state->uapi.mode_changed = true;
7866 intel_vrr_check_modeset(state);
7868 ret = drm_atomic_helper_check_modeset(dev, &state->base);
7872 ret = intel_bigjoiner_add_affected_crtcs(state);
7876 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7877 new_crtc_state, i) {
7878 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7880 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7885 if (!new_crtc_state->uapi.enable) {
7886 if (!new_crtc_state->bigjoiner_slave) {
7887 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7893 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7897 ret = intel_modeset_pipe_config(state, new_crtc_state);
7901 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7907 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7908 new_crtc_state, i) {
7909 if (!intel_crtc_needs_modeset(new_crtc_state))
7912 ret = intel_modeset_pipe_config_late(new_crtc_state);
7916 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7920 * Check if fastset is allowed by external dependencies like other
7921 * pipes and transcoders.
7923 * Right now it only forces a fullmodeset when the MST master
7924 * transcoder did not changed but the pipe of the master transcoder
7925 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7926 * in case of port synced crtcs, if one of the synced crtcs
7927 * needs a full modeset, all other synced crtcs should be
7928 * forced a full modeset.
7930 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7931 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7934 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7935 enum transcoder master = new_crtc_state->mst_master_transcoder;
7937 if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7938 new_crtc_state->uapi.mode_changed = true;
7939 new_crtc_state->update_pipe = false;
7943 if (is_trans_port_sync_mode(new_crtc_state)) {
7944 u8 trans = new_crtc_state->sync_mode_slaves_mask;
7946 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7947 trans |= BIT(new_crtc_state->master_transcoder);
7949 if (intel_cpu_transcoders_need_modeset(state, trans)) {
7950 new_crtc_state->uapi.mode_changed = true;
7951 new_crtc_state->update_pipe = false;
7955 if (new_crtc_state->bigjoiner) {
7956 struct intel_crtc_state *linked_crtc_state =
7957 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
7959 if (intel_crtc_needs_modeset(linked_crtc_state)) {
7960 new_crtc_state->uapi.mode_changed = true;
7961 new_crtc_state->update_pipe = false;
7966 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7967 new_crtc_state, i) {
7968 if (intel_crtc_needs_modeset(new_crtc_state)) {
7973 if (!new_crtc_state->update_pipe)
7976 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
7979 if (any_ms && !check_digital_port_conflicts(state)) {
7980 drm_dbg_kms(&dev_priv->drm,
7981 "rejecting conflicting digital port configuration\n");
7986 ret = drm_dp_mst_atomic_check(&state->base);
7990 ret = intel_atomic_check_planes(state);
7994 ret = intel_compute_global_watermarks(state);
7998 ret = intel_bw_atomic_check(state);
8002 ret = intel_cdclk_atomic_check(state, &any_ms);
8006 if (intel_any_crtc_needs_modeset(state))
8010 ret = intel_modeset_checks(state);
8014 ret = intel_modeset_calc_cdclk(state);
8018 intel_modeset_clear_plls(state);
8021 ret = intel_atomic_check_crtcs(state);
8025 ret = intel_fbc_atomic_check(state);
8029 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8030 new_crtc_state, i) {
8031 if (new_crtc_state->uapi.async_flip) {
8032 ret = intel_atomic_check_async(state, crtc);
8037 if (!intel_crtc_needs_modeset(new_crtc_state) &&
8038 !new_crtc_state->update_pipe)
8041 intel_dump_pipe_config(new_crtc_state, state,
8042 intel_crtc_needs_modeset(new_crtc_state) ?
8043 "[modeset]" : "[fastset]");
8049 if (ret == -EDEADLK)
8053 * FIXME would probably be nice to know which crtc specifically
8054 * caused the failure, in cases where we can pinpoint it.
8056 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8058 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8063 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8065 struct intel_crtc_state *crtc_state;
8066 struct intel_crtc *crtc;
8069 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8073 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8074 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8076 if (mode_changed || crtc_state->update_pipe ||
8077 crtc_state->uapi.color_mgmt_changed) {
8078 intel_dsb_prepare(crtc_state);
8085 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8086 struct intel_crtc_state *crtc_state)
8088 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8090 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8091 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8093 if (crtc_state->has_pch_encoder) {
8094 enum pipe pch_transcoder =
8095 intel_crtc_pch_transcoder(crtc);
8097 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8101 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8102 const struct intel_crtc_state *new_crtc_state)
8104 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8108 * Update pipe size and adjust fitter if needed: the reason for this is
8109 * that in compute_mode_changes we check the native mode (not the pfit
8110 * mode) to see if we can flip rather than do a full mode set. In the
8111 * fastboot case, we'll flip, but if we don't update the pipesrc and
8112 * pfit state, we'll end up with a big fb scanned out into the wrong
8115 intel_set_pipe_src_size(new_crtc_state);
8117 /* on skylake this is done by detaching scalers */
8118 if (DISPLAY_VER(dev_priv) >= 9) {
8119 if (new_crtc_state->pch_pfit.enabled)
8120 skl_pfit_enable(new_crtc_state);
8121 } else if (HAS_PCH_SPLIT(dev_priv)) {
8122 if (new_crtc_state->pch_pfit.enabled)
8123 ilk_pfit_enable(new_crtc_state);
8124 else if (old_crtc_state->pch_pfit.enabled)
8125 ilk_pfit_disable(old_crtc_state);
8129 * The register is supposedly single buffered so perhaps
8130 * not 100% correct to do this here. But SKL+ calculate
8131 * this based on the adjust pixel rate so pfit changes do
8132 * affect it and so it must be updated for fastsets.
8133 * HSW/BDW only really need this here for fastboot, after
8134 * that the value should not change without a full modeset.
8136 if (DISPLAY_VER(dev_priv) >= 9 ||
8137 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8138 hsw_set_linetime_wm(new_crtc_state);
8140 if (DISPLAY_VER(dev_priv) >= 11)
8141 icl_set_pipe_chicken(new_crtc_state);
8144 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8145 struct intel_crtc *crtc)
8147 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8148 const struct intel_crtc_state *old_crtc_state =
8149 intel_atomic_get_old_crtc_state(state, crtc);
8150 const struct intel_crtc_state *new_crtc_state =
8151 intel_atomic_get_new_crtc_state(state, crtc);
8152 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8155 * During modesets pipe configuration was programmed as the
8159 if (new_crtc_state->uapi.color_mgmt_changed ||
8160 new_crtc_state->update_pipe)
8161 intel_color_commit(new_crtc_state);
8163 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8164 bdw_set_pipemisc(new_crtc_state);
8166 if (new_crtc_state->update_pipe)
8167 intel_pipe_fastset(old_crtc_state, new_crtc_state);
8170 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8172 intel_atomic_update_watermarks(state, crtc);
8175 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8176 struct intel_crtc *crtc)
8178 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8179 const struct intel_crtc_state *new_crtc_state =
8180 intel_atomic_get_new_crtc_state(state, crtc);
8183 * Disable the scaler(s) after the plane(s) so that we don't
8184 * get a catastrophic underrun even if the two operations
8185 * end up happening in two different frames.
8187 if (DISPLAY_VER(dev_priv) >= 9 &&
8188 !intel_crtc_needs_modeset(new_crtc_state))
8189 skl_detach_scalers(new_crtc_state);
8192 static void intel_enable_crtc(struct intel_atomic_state *state,
8193 struct intel_crtc *crtc)
8195 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8196 const struct intel_crtc_state *new_crtc_state =
8197 intel_atomic_get_new_crtc_state(state, crtc);
8199 if (!intel_crtc_needs_modeset(new_crtc_state))
8202 intel_crtc_update_active_timings(new_crtc_state);
8204 dev_priv->display->crtc_enable(state, crtc);
8206 if (new_crtc_state->bigjoiner_slave)
8209 /* vblanks work again, re-enable pipe CRC. */
8210 intel_crtc_enable_pipe_crc(crtc);
8213 static void intel_update_crtc(struct intel_atomic_state *state,
8214 struct intel_crtc *crtc)
8216 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8217 const struct intel_crtc_state *old_crtc_state =
8218 intel_atomic_get_old_crtc_state(state, crtc);
8219 struct intel_crtc_state *new_crtc_state =
8220 intel_atomic_get_new_crtc_state(state, crtc);
8221 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8224 if (new_crtc_state->preload_luts &&
8225 (new_crtc_state->uapi.color_mgmt_changed ||
8226 new_crtc_state->update_pipe))
8227 intel_color_load_luts(new_crtc_state);
8229 intel_pre_plane_update(state, crtc);
8231 if (new_crtc_state->update_pipe)
8232 intel_encoders_update_pipe(state, crtc);
8235 intel_fbc_update(state, crtc);
8237 intel_update_planes_on_crtc(state, crtc);
8239 /* Perform vblank evasion around commit operation */
8240 intel_pipe_update_start(new_crtc_state);
8242 commit_pipe_pre_planes(state, crtc);
8244 if (DISPLAY_VER(dev_priv) >= 9)
8245 skl_arm_planes_on_crtc(state, crtc);
8247 i9xx_arm_planes_on_crtc(state, crtc);
8249 commit_pipe_post_planes(state, crtc);
8251 intel_pipe_update_end(new_crtc_state);
8254 * We usually enable FIFO underrun interrupts as part of the
8255 * CRTC enable sequence during modesets. But when we inherit a
8256 * valid pipe configuration from the BIOS we need to take care
8257 * of enabling them on the CRTC's first fastset.
8259 if (new_crtc_state->update_pipe && !modeset &&
8260 old_crtc_state->inherited)
8261 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8264 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8265 struct intel_crtc_state *old_crtc_state,
8266 struct intel_crtc_state *new_crtc_state,
8267 struct intel_crtc *crtc)
8269 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8272 * We need to disable pipe CRC before disabling the pipe,
8273 * or we race against vblank off.
8275 intel_crtc_disable_pipe_crc(crtc);
8277 dev_priv->display->crtc_disable(state, crtc);
8278 crtc->active = false;
8279 intel_fbc_disable(crtc);
8280 intel_disable_shared_dpll(old_crtc_state);
8282 /* FIXME unify this for all platforms */
8283 if (!new_crtc_state->hw.active &&
8284 !HAS_GMCH(dev_priv))
8285 intel_initial_watermarks(state, crtc);
8288 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8290 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8291 struct intel_crtc *crtc;
8295 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8296 new_crtc_state, i) {
8297 if (!intel_crtc_needs_modeset(new_crtc_state))
8300 if (!old_crtc_state->hw.active)
8303 intel_pre_plane_update(state, crtc);
8304 intel_crtc_disable_planes(state, crtc);
8307 /* Only disable port sync and MST slaves */
8308 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8309 new_crtc_state, i) {
8310 if (!intel_crtc_needs_modeset(new_crtc_state))
8313 if (!old_crtc_state->hw.active)
8316 /* In case of Transcoder port Sync master slave CRTCs can be
8317 * assigned in any order and we need to make sure that
8318 * slave CRTCs are disabled first and then master CRTC since
8319 * Slave vblanks are masked till Master Vblanks.
8321 if (!is_trans_port_sync_slave(old_crtc_state) &&
8322 !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8323 !old_crtc_state->bigjoiner_slave)
8326 intel_old_crtc_state_disables(state, old_crtc_state,
8327 new_crtc_state, crtc);
8328 handled |= BIT(crtc->pipe);
8331 /* Disable everything else left on */
8332 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8333 new_crtc_state, i) {
8334 if (!intel_crtc_needs_modeset(new_crtc_state) ||
8335 (handled & BIT(crtc->pipe)))
8338 if (!old_crtc_state->hw.active)
8341 intel_old_crtc_state_disables(state, old_crtc_state,
8342 new_crtc_state, crtc);
8346 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8348 struct intel_crtc_state *new_crtc_state;
8349 struct intel_crtc *crtc;
8352 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8353 if (!new_crtc_state->hw.active)
8356 intel_enable_crtc(state, crtc);
8357 intel_update_crtc(state, crtc);
8361 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8363 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8364 struct intel_crtc *crtc;
8365 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8366 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8367 u8 update_pipes = 0, modeset_pipes = 0;
8370 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8371 enum pipe pipe = crtc->pipe;
8373 if (!new_crtc_state->hw.active)
8376 /* ignore allocations for crtc's that have been turned off. */
8377 if (!intel_crtc_needs_modeset(new_crtc_state)) {
8378 entries[pipe] = old_crtc_state->wm.skl.ddb;
8379 update_pipes |= BIT(pipe);
8381 modeset_pipes |= BIT(pipe);
8386 * Whenever the number of active pipes changes, we need to make sure we
8387 * update the pipes in the right order so that their ddb allocations
8388 * never overlap with each other between CRTC updates. Otherwise we'll
8389 * cause pipe underruns and other bad stuff.
8391 * So first lets enable all pipes that do not need a fullmodeset as
8392 * those don't have any external dependency.
8394 while (update_pipes) {
8395 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8396 new_crtc_state, i) {
8397 enum pipe pipe = crtc->pipe;
8399 if ((update_pipes & BIT(pipe)) == 0)
8402 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8403 entries, I915_MAX_PIPES, pipe))
8406 entries[pipe] = new_crtc_state->wm.skl.ddb;
8407 update_pipes &= ~BIT(pipe);
8409 intel_update_crtc(state, crtc);
8412 * If this is an already active pipe, it's DDB changed,
8413 * and this isn't the last pipe that needs updating
8414 * then we need to wait for a vblank to pass for the
8415 * new ddb allocation to take effect.
8417 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8418 &old_crtc_state->wm.skl.ddb) &&
8419 (update_pipes | modeset_pipes))
8420 intel_crtc_wait_for_next_vblank(crtc);
8424 update_pipes = modeset_pipes;
8427 * Enable all pipes that needs a modeset and do not depends on other
8430 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8431 enum pipe pipe = crtc->pipe;
8433 if ((modeset_pipes & BIT(pipe)) == 0)
8436 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8437 is_trans_port_sync_master(new_crtc_state) ||
8438 (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8441 modeset_pipes &= ~BIT(pipe);
8443 intel_enable_crtc(state, crtc);
8447 * Then we enable all remaining pipes that depend on other
8448 * pipes: MST slaves and port sync masters, big joiner master
8450 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8451 enum pipe pipe = crtc->pipe;
8453 if ((modeset_pipes & BIT(pipe)) == 0)
8456 modeset_pipes &= ~BIT(pipe);
8458 intel_enable_crtc(state, crtc);
8462 * Finally we do the plane updates/etc. for all pipes that got enabled.
8464 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8465 enum pipe pipe = crtc->pipe;
8467 if ((update_pipes & BIT(pipe)) == 0)
8470 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8471 entries, I915_MAX_PIPES, pipe));
8473 entries[pipe] = new_crtc_state->wm.skl.ddb;
8474 update_pipes &= ~BIT(pipe);
8476 intel_update_crtc(state, crtc);
8479 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8480 drm_WARN_ON(&dev_priv->drm, update_pipes);
8483 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8485 struct intel_atomic_state *state, *next;
8486 struct llist_node *freed;
8488 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8489 llist_for_each_entry_safe(state, next, freed, freed)
8490 drm_atomic_state_put(&state->base);
8493 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8495 struct drm_i915_private *dev_priv =
8496 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8498 intel_atomic_helper_free_state(dev_priv);
8501 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8503 struct wait_queue_entry wait_fence, wait_reset;
8504 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8506 init_wait_entry(&wait_fence, 0);
8507 init_wait_entry(&wait_reset, 0);
8509 prepare_to_wait(&intel_state->commit_ready.wait,
8510 &wait_fence, TASK_UNINTERRUPTIBLE);
8511 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8512 I915_RESET_MODESET),
8513 &wait_reset, TASK_UNINTERRUPTIBLE);
8516 if (i915_sw_fence_done(&intel_state->commit_ready) ||
8517 test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
8522 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8523 finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8524 I915_RESET_MODESET),
8528 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8530 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8531 struct intel_crtc *crtc;
8534 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8536 intel_dsb_cleanup(old_crtc_state);
8539 static void intel_atomic_cleanup_work(struct work_struct *work)
8541 struct intel_atomic_state *state =
8542 container_of(work, struct intel_atomic_state, base.commit_work);
8543 struct drm_i915_private *i915 = to_i915(state->base.dev);
8545 intel_cleanup_dsbs(state);
8546 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8547 drm_atomic_helper_commit_cleanup_done(&state->base);
8548 drm_atomic_state_put(&state->base);
8550 intel_atomic_helper_free_state(i915);
8553 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8555 struct drm_i915_private *i915 = to_i915(state->base.dev);
8556 struct intel_plane *plane;
8557 struct intel_plane_state *plane_state;
8560 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8561 struct drm_framebuffer *fb = plane_state->hw.fb;
8568 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8573 * The layout of the fast clear color value expected by HW
8574 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8575 * - 4 x 4 bytes per-channel value
8576 * (in surface type specific float/int format provided by the fb user)
8577 * - 8 bytes native color value used by the display
8578 * (converted/written by GPU during a fast clear operation using the
8579 * above per-channel values)
8581 * The commit's FB prepare hook already ensured that FB obj is pinned and the
8582 * caller made sure that the object is synced wrt. the related color clear value
8585 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8586 fb->offsets[cc_plane] + 16,
8587 &plane_state->ccval,
8588 sizeof(plane_state->ccval));
8589 /* The above could only fail if the FB obj has an unexpected backing store type. */
8590 drm_WARN_ON(&i915->drm, ret);
8594 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8596 struct drm_device *dev = state->base.dev;
8597 struct drm_i915_private *dev_priv = to_i915(dev);
8598 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8599 struct intel_crtc *crtc;
8600 u64 put_domains[I915_MAX_PIPES] = {};
8601 intel_wakeref_t wakeref = 0;
8604 intel_atomic_commit_fence_wait(state);
8606 drm_atomic_helper_wait_for_dependencies(&state->base);
8609 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8611 intel_atomic_prepare_plane_clear_colors(state);
8613 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8614 new_crtc_state, i) {
8615 if (intel_crtc_needs_modeset(new_crtc_state) ||
8616 new_crtc_state->update_pipe) {
8618 put_domains[crtc->pipe] =
8619 modeset_get_crtc_power_domains(new_crtc_state);
8623 intel_commit_modeset_disables(state);
8625 /* FIXME: Eventually get rid of our crtc->config pointer */
8626 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8627 crtc->config = new_crtc_state;
8629 if (state->modeset) {
8630 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8632 intel_set_cdclk_pre_plane_update(state);
8634 intel_modeset_verify_disabled(dev_priv, state);
8637 intel_sagv_pre_plane_update(state);
8639 /* Complete the events for pipes that have now been disabled */
8640 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8641 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8643 /* Complete events for now disable pipes here. */
8644 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8645 spin_lock_irq(&dev->event_lock);
8646 drm_crtc_send_vblank_event(&crtc->base,
8647 new_crtc_state->uapi.event);
8648 spin_unlock_irq(&dev->event_lock);
8650 new_crtc_state->uapi.event = NULL;
8654 intel_encoders_update_prepare(state);
8656 intel_dbuf_pre_plane_update(state);
8658 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8659 if (new_crtc_state->uapi.async_flip)
8660 intel_crtc_enable_flip_done(state, crtc);
8663 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8664 dev_priv->display->commit_modeset_enables(state);
8666 intel_encoders_update_complete(state);
8669 intel_set_cdclk_post_plane_update(state);
8671 intel_wait_for_vblank_workers(state);
8673 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8674 * already, but still need the state for the delayed optimization. To
8676 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8677 * - schedule that vblank worker _before_ calling hw_done
8678 * - at the start of commit_tail, cancel it _synchrously
8679 * - switch over to the vblank wait helper in the core after that since
8680 * we don't need out special handling any more.
8682 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8684 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8685 if (new_crtc_state->uapi.async_flip)
8686 intel_crtc_disable_flip_done(state, crtc);
8690 * Now that the vblank has passed, we can go ahead and program the
8691 * optimal watermarks on platforms that need two-step watermark
8694 * TODO: Move this (and other cleanup) to an async worker eventually.
8696 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8697 new_crtc_state, i) {
8699 * Gen2 reports pipe underruns whenever all planes are disabled.
8700 * So re-enable underrun reporting after some planes get enabled.
8702 * We do this before .optimize_watermarks() so that we have a
8703 * chance of catching underruns with the intermediate watermarks
8704 * vs. the new plane configuration.
8706 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8707 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8709 intel_optimize_watermarks(state, crtc);
8712 intel_dbuf_post_plane_update(state);
8713 intel_psr_post_plane_update(state);
8715 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8716 intel_post_plane_update(state, crtc);
8718 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8720 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8723 * DSB cleanup is done in cleanup_work aligning with framebuffer
8724 * cleanup. So copy and reset the dsb structure to sync with
8725 * commit_done and later do dsb cleanup in cleanup_work.
8727 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8730 /* Underruns don't always raise interrupts, so check manually */
8731 intel_check_cpu_fifo_underruns(dev_priv);
8732 intel_check_pch_fifo_underruns(dev_priv);
8735 intel_verify_planes(state);
8737 intel_sagv_post_plane_update(state);
8739 drm_atomic_helper_commit_hw_done(&state->base);
8741 if (state->modeset) {
8742 /* As one of the primary mmio accessors, KMS has a high
8743 * likelihood of triggering bugs in unclaimed access. After we
8744 * finish modesetting, see if an error has been flagged, and if
8745 * so enable debugging for the next modeset - and hope we catch
8748 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8749 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8751 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8754 * Defer the cleanup of the old state to a separate worker to not
8755 * impede the current task (userspace for blocking modesets) that
8756 * are executed inline. For out-of-line asynchronous modesets/flips,
8757 * deferring to a new worker seems overkill, but we would place a
8758 * schedule point (cond_resched()) here anyway to keep latencies
8761 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8762 queue_work(system_highpri_wq, &state->base.commit_work);
8765 static void intel_atomic_commit_work(struct work_struct *work)
8767 struct intel_atomic_state *state =
8768 container_of(work, struct intel_atomic_state, base.commit_work);
8770 intel_atomic_commit_tail(state);
8774 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8775 enum i915_sw_fence_notify notify)
8777 struct intel_atomic_state *state =
8778 container_of(fence, struct intel_atomic_state, commit_ready);
8781 case FENCE_COMPLETE:
8782 /* we do blocking waits in the worker, nothing to do here */
8786 struct intel_atomic_helper *helper =
8787 &to_i915(state->base.dev)->atomic_helper;
8789 if (llist_add(&state->freed, &helper->free_list))
8790 schedule_work(&helper->free_work);
8798 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8800 struct intel_plane_state *old_plane_state, *new_plane_state;
8801 struct intel_plane *plane;
8804 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8806 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8807 to_intel_frontbuffer(new_plane_state->hw.fb),
8808 plane->frontbuffer_bit);
8811 static int intel_atomic_commit(struct drm_device *dev,
8812 struct drm_atomic_state *_state,
8815 struct intel_atomic_state *state = to_intel_atomic_state(_state);
8816 struct drm_i915_private *dev_priv = to_i915(dev);
8819 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8821 drm_atomic_state_get(&state->base);
8822 i915_sw_fence_init(&state->commit_ready,
8823 intel_atomic_commit_ready);
8826 * The intel_legacy_cursor_update() fast path takes care
8827 * of avoiding the vblank waits for simple cursor
8828 * movement and flips. For cursor on/off and size changes,
8829 * we want to perform the vblank waits so that watermark
8830 * updates happen during the correct frames. Gen9+ have
8831 * double buffered watermarks and so shouldn't need this.
8833 * Unset state->legacy_cursor_update before the call to
8834 * drm_atomic_helper_setup_commit() because otherwise
8835 * drm_atomic_helper_wait_for_flip_done() is a noop and
8836 * we get FIFO underruns because we didn't wait
8839 * FIXME doing watermarks and fb cleanup from a vblank worker
8840 * (assuming we had any) would solve these problems.
8842 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8843 struct intel_crtc_state *new_crtc_state;
8844 struct intel_crtc *crtc;
8847 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8848 if (new_crtc_state->wm.need_postvbl_update ||
8849 new_crtc_state->update_wm_post)
8850 state->base.legacy_cursor_update = false;
8853 ret = intel_atomic_prepare_commit(state);
8855 drm_dbg_atomic(&dev_priv->drm,
8856 "Preparing state failed with %i\n", ret);
8857 i915_sw_fence_commit(&state->commit_ready);
8858 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8862 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8864 ret = drm_atomic_helper_swap_state(&state->base, true);
8866 intel_atomic_swap_global_state(state);
8869 struct intel_crtc_state *new_crtc_state;
8870 struct intel_crtc *crtc;
8873 i915_sw_fence_commit(&state->commit_ready);
8875 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8876 intel_dsb_cleanup(new_crtc_state);
8878 drm_atomic_helper_cleanup_planes(dev, &state->base);
8879 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8882 intel_shared_dpll_swap_state(state);
8883 intel_atomic_track_fbs(state);
8885 drm_atomic_state_get(&state->base);
8886 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8888 i915_sw_fence_commit(&state->commit_ready);
8889 if (nonblock && state->modeset) {
8890 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8891 } else if (nonblock) {
8892 queue_work(dev_priv->flip_wq, &state->base.commit_work);
8895 flush_workqueue(dev_priv->modeset_wq);
8896 intel_atomic_commit_tail(state);
8903 * intel_plane_destroy - destroy a plane
8904 * @plane: plane to destroy
8906 * Common destruction function for all types of planes (primary, cursor,
8909 void intel_plane_destroy(struct drm_plane *plane)
8911 drm_plane_cleanup(plane);
8912 kfree(to_intel_plane(plane));
8915 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8917 struct intel_plane *plane;
8919 for_each_intel_plane(&dev_priv->drm, plane) {
8920 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8923 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8928 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8929 struct drm_file *file)
8931 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8932 struct drm_crtc *drmmode_crtc;
8933 struct intel_crtc *crtc;
8935 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8939 crtc = to_intel_crtc(drmmode_crtc);
8940 pipe_from_crtc_id->pipe = crtc->pipe;
8945 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8947 struct drm_device *dev = encoder->base.dev;
8948 struct intel_encoder *source_encoder;
8949 u32 possible_clones = 0;
8951 for_each_intel_encoder(dev, source_encoder) {
8952 if (encoders_cloneable(encoder, source_encoder))
8953 possible_clones |= drm_encoder_mask(&source_encoder->base);
8956 return possible_clones;
8959 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8961 struct drm_device *dev = encoder->base.dev;
8962 struct intel_crtc *crtc;
8963 u32 possible_crtcs = 0;
8965 for_each_intel_crtc(dev, crtc) {
8966 if (encoder->pipe_mask & BIT(crtc->pipe))
8967 possible_crtcs |= drm_crtc_mask(&crtc->base);
8970 return possible_crtcs;
8973 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
8975 if (!IS_MOBILE(dev_priv))
8978 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
8981 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
8987 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
8989 if (DISPLAY_VER(dev_priv) >= 9)
8992 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
8995 if (HAS_PCH_LPT_H(dev_priv) &&
8996 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
8999 /* DDI E can't be used if DDI A requires 4 lanes */
9000 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9003 if (!dev_priv->vbt.int_crt_support)
9009 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9011 struct intel_encoder *encoder;
9012 bool dpd_is_edp = false;
9014 intel_pps_unlock_regs_wa(dev_priv);
9016 if (!HAS_DISPLAY(dev_priv))
9019 if (IS_DG2(dev_priv)) {
9020 intel_ddi_init(dev_priv, PORT_A);
9021 intel_ddi_init(dev_priv, PORT_B);
9022 intel_ddi_init(dev_priv, PORT_C);
9023 intel_ddi_init(dev_priv, PORT_D_XELPD);
9024 } else if (IS_ALDERLAKE_P(dev_priv)) {
9025 intel_ddi_init(dev_priv, PORT_A);
9026 intel_ddi_init(dev_priv, PORT_B);
9027 intel_ddi_init(dev_priv, PORT_TC1);
9028 intel_ddi_init(dev_priv, PORT_TC2);
9029 intel_ddi_init(dev_priv, PORT_TC3);
9030 intel_ddi_init(dev_priv, PORT_TC4);
9031 icl_dsi_init(dev_priv);
9032 } else if (IS_ALDERLAKE_S(dev_priv)) {
9033 intel_ddi_init(dev_priv, PORT_A);
9034 intel_ddi_init(dev_priv, PORT_TC1);
9035 intel_ddi_init(dev_priv, PORT_TC2);
9036 intel_ddi_init(dev_priv, PORT_TC3);
9037 intel_ddi_init(dev_priv, PORT_TC4);
9038 } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9039 intel_ddi_init(dev_priv, PORT_A);
9040 intel_ddi_init(dev_priv, PORT_B);
9041 intel_ddi_init(dev_priv, PORT_TC1);
9042 intel_ddi_init(dev_priv, PORT_TC2);
9043 } else if (DISPLAY_VER(dev_priv) >= 12) {
9044 intel_ddi_init(dev_priv, PORT_A);
9045 intel_ddi_init(dev_priv, PORT_B);
9046 intel_ddi_init(dev_priv, PORT_TC1);
9047 intel_ddi_init(dev_priv, PORT_TC2);
9048 intel_ddi_init(dev_priv, PORT_TC3);
9049 intel_ddi_init(dev_priv, PORT_TC4);
9050 intel_ddi_init(dev_priv, PORT_TC5);
9051 intel_ddi_init(dev_priv, PORT_TC6);
9052 icl_dsi_init(dev_priv);
9053 } else if (IS_JSL_EHL(dev_priv)) {
9054 intel_ddi_init(dev_priv, PORT_A);
9055 intel_ddi_init(dev_priv, PORT_B);
9056 intel_ddi_init(dev_priv, PORT_C);
9057 intel_ddi_init(dev_priv, PORT_D);
9058 icl_dsi_init(dev_priv);
9059 } else if (DISPLAY_VER(dev_priv) == 11) {
9060 intel_ddi_init(dev_priv, PORT_A);
9061 intel_ddi_init(dev_priv, PORT_B);
9062 intel_ddi_init(dev_priv, PORT_C);
9063 intel_ddi_init(dev_priv, PORT_D);
9064 intel_ddi_init(dev_priv, PORT_E);
9065 intel_ddi_init(dev_priv, PORT_F);
9066 icl_dsi_init(dev_priv);
9067 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9068 intel_ddi_init(dev_priv, PORT_A);
9069 intel_ddi_init(dev_priv, PORT_B);
9070 intel_ddi_init(dev_priv, PORT_C);
9071 vlv_dsi_init(dev_priv);
9072 } else if (DISPLAY_VER(dev_priv) >= 9) {
9073 intel_ddi_init(dev_priv, PORT_A);
9074 intel_ddi_init(dev_priv, PORT_B);
9075 intel_ddi_init(dev_priv, PORT_C);
9076 intel_ddi_init(dev_priv, PORT_D);
9077 intel_ddi_init(dev_priv, PORT_E);
9078 } else if (HAS_DDI(dev_priv)) {
9081 if (intel_ddi_crt_present(dev_priv))
9082 intel_crt_init(dev_priv);
9084 /* Haswell uses DDI functions to detect digital outputs. */
9085 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9087 intel_ddi_init(dev_priv, PORT_A);
9089 found = intel_de_read(dev_priv, SFUSE_STRAP);
9090 if (found & SFUSE_STRAP_DDIB_DETECTED)
9091 intel_ddi_init(dev_priv, PORT_B);
9092 if (found & SFUSE_STRAP_DDIC_DETECTED)
9093 intel_ddi_init(dev_priv, PORT_C);
9094 if (found & SFUSE_STRAP_DDID_DETECTED)
9095 intel_ddi_init(dev_priv, PORT_D);
9096 if (found & SFUSE_STRAP_DDIF_DETECTED)
9097 intel_ddi_init(dev_priv, PORT_F);
9098 } else if (HAS_PCH_SPLIT(dev_priv)) {
9102 * intel_edp_init_connector() depends on this completing first,
9103 * to prevent the registration of both eDP and LVDS and the
9104 * incorrect sharing of the PPS.
9106 intel_lvds_init(dev_priv);
9107 intel_crt_init(dev_priv);
9109 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9111 if (ilk_has_edp_a(dev_priv))
9112 g4x_dp_init(dev_priv, DP_A, PORT_A);
9114 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9115 /* PCH SDVOB multiplex with HDMIB */
9116 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9118 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9119 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9120 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9123 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9124 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9126 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9127 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9129 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9130 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9132 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9133 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9134 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9135 bool has_edp, has_port;
9137 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9138 intel_crt_init(dev_priv);
9141 * The DP_DETECTED bit is the latched state of the DDC
9142 * SDA pin at boot. However since eDP doesn't require DDC
9143 * (no way to plug in a DP->HDMI dongle) the DDC pins for
9144 * eDP ports may have been muxed to an alternate function.
9145 * Thus we can't rely on the DP_DETECTED bit alone to detect
9146 * eDP ports. Consult the VBT as well as DP_DETECTED to
9149 * Sadly the straps seem to be missing sometimes even for HDMI
9150 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9151 * and VBT for the presence of the port. Additionally we can't
9152 * trust the port type the VBT declares as we've seen at least
9153 * HDMI ports that the VBT claim are DP or eDP.
9155 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9156 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9157 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9158 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9159 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9160 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9162 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9163 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9164 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9165 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9166 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9167 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9169 if (IS_CHERRYVIEW(dev_priv)) {
9171 * eDP not supported on port D,
9172 * so no need to worry about it
9174 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9175 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9176 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9177 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9178 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9181 vlv_dsi_init(dev_priv);
9182 } else if (IS_PINEVIEW(dev_priv)) {
9183 intel_lvds_init(dev_priv);
9184 intel_crt_init(dev_priv);
9185 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9188 if (IS_MOBILE(dev_priv))
9189 intel_lvds_init(dev_priv);
9191 intel_crt_init(dev_priv);
9193 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9194 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9195 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9196 if (!found && IS_G4X(dev_priv)) {
9197 drm_dbg_kms(&dev_priv->drm,
9198 "probing HDMI on SDVOB\n");
9199 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9202 if (!found && IS_G4X(dev_priv))
9203 g4x_dp_init(dev_priv, DP_B, PORT_B);
9206 /* Before G4X SDVOC doesn't have its own detect register */
9208 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9209 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9210 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9213 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9215 if (IS_G4X(dev_priv)) {
9216 drm_dbg_kms(&dev_priv->drm,
9217 "probing HDMI on SDVOC\n");
9218 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9220 if (IS_G4X(dev_priv))
9221 g4x_dp_init(dev_priv, DP_C, PORT_C);
9224 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9225 g4x_dp_init(dev_priv, DP_D, PORT_D);
9227 if (SUPPORTS_TV(dev_priv))
9228 intel_tv_init(dev_priv);
9229 } else if (DISPLAY_VER(dev_priv) == 2) {
9230 if (IS_I85X(dev_priv))
9231 intel_lvds_init(dev_priv);
9233 intel_crt_init(dev_priv);
9234 intel_dvo_init(dev_priv);
9237 for_each_intel_encoder(&dev_priv->drm, encoder) {
9238 encoder->base.possible_crtcs =
9239 intel_encoder_possible_crtcs(encoder);
9240 encoder->base.possible_clones =
9241 intel_encoder_possible_clones(encoder);
9244 intel_init_pch_refclk(dev_priv);
9246 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9249 static enum drm_mode_status
9250 intel_mode_valid(struct drm_device *dev,
9251 const struct drm_display_mode *mode)
9253 struct drm_i915_private *dev_priv = to_i915(dev);
9254 int hdisplay_max, htotal_max;
9255 int vdisplay_max, vtotal_max;
9258 * Can't reject DBLSCAN here because Xorg ddxen can add piles
9259 * of DBLSCAN modes to the output's mode list when they detect
9260 * the scaling mode property on the connector. And they don't
9261 * ask the kernel to validate those modes in any way until
9262 * modeset time at which point the client gets a protocol error.
9263 * So in order to not upset those clients we silently ignore the
9264 * DBLSCAN flag on such connectors. For other connectors we will
9265 * reject modes with the DBLSCAN flag in encoder->compute_config().
9266 * And we always reject DBLSCAN modes in connector->mode_valid()
9267 * as we never want such modes on the connector's mode list.
9270 if (mode->vscan > 1)
9271 return MODE_NO_VSCAN;
9273 if (mode->flags & DRM_MODE_FLAG_HSKEW)
9274 return MODE_H_ILLEGAL;
9276 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9277 DRM_MODE_FLAG_NCSYNC |
9278 DRM_MODE_FLAG_PCSYNC))
9281 if (mode->flags & (DRM_MODE_FLAG_BCAST |
9282 DRM_MODE_FLAG_PIXMUX |
9283 DRM_MODE_FLAG_CLKDIV2))
9286 /* Transcoder timing limits */
9287 if (DISPLAY_VER(dev_priv) >= 11) {
9288 hdisplay_max = 16384;
9289 vdisplay_max = 8192;
9292 } else if (DISPLAY_VER(dev_priv) >= 9 ||
9293 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9294 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9295 vdisplay_max = 4096;
9298 } else if (DISPLAY_VER(dev_priv) >= 3) {
9299 hdisplay_max = 4096;
9300 vdisplay_max = 4096;
9304 hdisplay_max = 2048;
9305 vdisplay_max = 2048;
9310 if (mode->hdisplay > hdisplay_max ||
9311 mode->hsync_start > htotal_max ||
9312 mode->hsync_end > htotal_max ||
9313 mode->htotal > htotal_max)
9314 return MODE_H_ILLEGAL;
9316 if (mode->vdisplay > vdisplay_max ||
9317 mode->vsync_start > vtotal_max ||
9318 mode->vsync_end > vtotal_max ||
9319 mode->vtotal > vtotal_max)
9320 return MODE_V_ILLEGAL;
9322 if (DISPLAY_VER(dev_priv) >= 5) {
9323 if (mode->hdisplay < 64 ||
9324 mode->htotal - mode->hdisplay < 32)
9325 return MODE_H_ILLEGAL;
9327 if (mode->vtotal - mode->vdisplay < 5)
9328 return MODE_V_ILLEGAL;
9330 if (mode->htotal - mode->hdisplay < 32)
9331 return MODE_H_ILLEGAL;
9333 if (mode->vtotal - mode->vdisplay < 3)
9334 return MODE_V_ILLEGAL;
9338 * Cantiga+ cannot handle modes with a hsync front porch of 0.
9339 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9341 if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9342 mode->hsync_start == mode->hdisplay)
9343 return MODE_H_ILLEGAL;
9348 enum drm_mode_status
9349 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9350 const struct drm_display_mode *mode,
9353 int plane_width_max, plane_height_max;
9356 * intel_mode_valid() should be
9357 * sufficient on older platforms.
9359 if (DISPLAY_VER(dev_priv) < 9)
9363 * Most people will probably want a fullscreen
9364 * plane so let's not advertize modes that are
9367 if (DISPLAY_VER(dev_priv) >= 11) {
9368 plane_width_max = 5120 << bigjoiner;
9369 plane_height_max = 4320;
9371 plane_width_max = 5120;
9372 plane_height_max = 4096;
9375 if (mode->hdisplay > plane_width_max)
9376 return MODE_H_ILLEGAL;
9378 if (mode->vdisplay > plane_height_max)
9379 return MODE_V_ILLEGAL;
9384 static const struct drm_mode_config_funcs intel_mode_funcs = {
9385 .fb_create = intel_user_framebuffer_create,
9386 .get_format_info = intel_fb_get_format_info,
9387 .output_poll_changed = intel_fbdev_output_poll_changed,
9388 .mode_valid = intel_mode_valid,
9389 .atomic_check = intel_atomic_check,
9390 .atomic_commit = intel_atomic_commit,
9391 .atomic_state_alloc = intel_atomic_state_alloc,
9392 .atomic_state_clear = intel_atomic_state_clear,
9393 .atomic_state_free = intel_atomic_state_free,
9396 static const struct drm_i915_display_funcs skl_display_funcs = {
9397 .get_pipe_config = hsw_get_pipe_config,
9398 .crtc_enable = hsw_crtc_enable,
9399 .crtc_disable = hsw_crtc_disable,
9400 .commit_modeset_enables = skl_commit_modeset_enables,
9401 .get_initial_plane_config = skl_get_initial_plane_config,
9404 static const struct drm_i915_display_funcs ddi_display_funcs = {
9405 .get_pipe_config = hsw_get_pipe_config,
9406 .crtc_enable = hsw_crtc_enable,
9407 .crtc_disable = hsw_crtc_disable,
9408 .commit_modeset_enables = intel_commit_modeset_enables,
9409 .get_initial_plane_config = i9xx_get_initial_plane_config,
9412 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9413 .get_pipe_config = ilk_get_pipe_config,
9414 .crtc_enable = ilk_crtc_enable,
9415 .crtc_disable = ilk_crtc_disable,
9416 .commit_modeset_enables = intel_commit_modeset_enables,
9417 .get_initial_plane_config = i9xx_get_initial_plane_config,
9420 static const struct drm_i915_display_funcs vlv_display_funcs = {
9421 .get_pipe_config = i9xx_get_pipe_config,
9422 .crtc_enable = valleyview_crtc_enable,
9423 .crtc_disable = i9xx_crtc_disable,
9424 .commit_modeset_enables = intel_commit_modeset_enables,
9425 .get_initial_plane_config = i9xx_get_initial_plane_config,
9428 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9429 .get_pipe_config = i9xx_get_pipe_config,
9430 .crtc_enable = i9xx_crtc_enable,
9431 .crtc_disable = i9xx_crtc_disable,
9432 .commit_modeset_enables = intel_commit_modeset_enables,
9433 .get_initial_plane_config = i9xx_get_initial_plane_config,
9437 * intel_init_display_hooks - initialize the display modesetting hooks
9438 * @dev_priv: device private
9440 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9442 if (!HAS_DISPLAY(dev_priv))
9445 intel_init_cdclk_hooks(dev_priv);
9446 intel_audio_hooks_init(dev_priv);
9448 intel_dpll_init_clock_hook(dev_priv);
9450 if (DISPLAY_VER(dev_priv) >= 9) {
9451 dev_priv->display = &skl_display_funcs;
9452 } else if (HAS_DDI(dev_priv)) {
9453 dev_priv->display = &ddi_display_funcs;
9454 } else if (HAS_PCH_SPLIT(dev_priv)) {
9455 dev_priv->display = &pch_split_display_funcs;
9456 } else if (IS_CHERRYVIEW(dev_priv) ||
9457 IS_VALLEYVIEW(dev_priv)) {
9458 dev_priv->display = &vlv_display_funcs;
9460 dev_priv->display = &i9xx_display_funcs;
9463 intel_fdi_init_hook(dev_priv);
9466 void intel_modeset_init_hw(struct drm_i915_private *i915)
9468 struct intel_cdclk_state *cdclk_state;
9470 if (!HAS_DISPLAY(i915))
9473 cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9475 intel_update_cdclk(i915);
9476 intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
9477 cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9480 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9482 struct drm_plane *plane;
9483 struct intel_crtc *crtc;
9485 for_each_intel_crtc(state->dev, crtc) {
9486 struct intel_crtc_state *crtc_state;
9488 crtc_state = intel_atomic_get_crtc_state(state, crtc);
9489 if (IS_ERR(crtc_state))
9490 return PTR_ERR(crtc_state);
9492 if (crtc_state->hw.active) {
9494 * Preserve the inherited flag to avoid
9495 * taking the full modeset path.
9497 crtc_state->inherited = true;
9501 drm_for_each_plane(plane, state->dev) {
9502 struct drm_plane_state *plane_state;
9504 plane_state = drm_atomic_get_plane_state(state, plane);
9505 if (IS_ERR(plane_state))
9506 return PTR_ERR(plane_state);
9513 * Calculate what we think the watermarks should be for the state we've read
9514 * out of the hardware and then immediately program those watermarks so that
9515 * we ensure the hardware settings match our internal state.
9517 * We can calculate what we think WM's should be by creating a duplicate of the
9518 * current state (which was constructed during hardware readout) and running it
9519 * through the atomic check code to calculate new watermark values in the
9522 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9524 struct drm_atomic_state *state;
9525 struct intel_atomic_state *intel_state;
9526 struct intel_crtc *crtc;
9527 struct intel_crtc_state *crtc_state;
9528 struct drm_modeset_acquire_ctx ctx;
9532 /* Only supported on platforms that use atomic watermark design */
9533 if (!dev_priv->wm_disp->optimize_watermarks)
9536 state = drm_atomic_state_alloc(&dev_priv->drm);
9537 if (drm_WARN_ON(&dev_priv->drm, !state))
9540 intel_state = to_intel_atomic_state(state);
9542 drm_modeset_acquire_init(&ctx, 0);
9545 state->acquire_ctx = &ctx;
9548 * Hardware readout is the only time we don't want to calculate
9549 * intermediate watermarks (since we don't trust the current
9552 if (!HAS_GMCH(dev_priv))
9553 intel_state->skip_intermediate_wm = true;
9555 ret = sanitize_watermarks_add_affected(state);
9559 ret = intel_atomic_check(&dev_priv->drm, state);
9563 /* Write calculated watermark values back */
9564 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9565 crtc_state->wm.need_postvbl_update = true;
9566 intel_optimize_watermarks(intel_state, crtc);
9568 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9572 if (ret == -EDEADLK) {
9573 drm_atomic_state_clear(state);
9574 drm_modeset_backoff(&ctx);
9579 * If we fail here, it means that the hardware appears to be
9580 * programmed in a way that shouldn't be possible, given our
9581 * understanding of watermark requirements. This might mean a
9582 * mistake in the hardware readout code or a mistake in the
9583 * watermark calculations for a given platform. Raise a WARN
9584 * so that this is noticeable.
9586 * If this actually happens, we'll have to just leave the
9587 * BIOS-programmed watermarks untouched and hope for the best.
9589 drm_WARN(&dev_priv->drm, ret,
9590 "Could not determine valid watermarks for inherited state\n");
9592 drm_atomic_state_put(state);
9594 drm_modeset_drop_locks(&ctx);
9595 drm_modeset_acquire_fini(&ctx);
9598 static int intel_initial_commit(struct drm_device *dev)
9600 struct drm_atomic_state *state = NULL;
9601 struct drm_modeset_acquire_ctx ctx;
9602 struct intel_crtc *crtc;
9605 state = drm_atomic_state_alloc(dev);
9609 drm_modeset_acquire_init(&ctx, 0);
9612 state->acquire_ctx = &ctx;
9614 for_each_intel_crtc(dev, crtc) {
9615 struct intel_crtc_state *crtc_state =
9616 intel_atomic_get_crtc_state(state, crtc);
9618 if (IS_ERR(crtc_state)) {
9619 ret = PTR_ERR(crtc_state);
9623 if (crtc_state->hw.active) {
9624 struct intel_encoder *encoder;
9627 * We've not yet detected sink capabilities
9628 * (audio,infoframes,etc.) and thus we don't want to
9629 * force a full state recomputation yet. We want that to
9630 * happen only for the first real commit from userspace.
9631 * So preserve the inherited flag for the time being.
9633 crtc_state->inherited = true;
9635 ret = drm_atomic_add_affected_planes(state, &crtc->base);
9640 * FIXME hack to force a LUT update to avoid the
9641 * plane update forcing the pipe gamma on without
9642 * having a proper LUT loaded. Remove once we
9643 * have readout for pipe gamma enable.
9645 crtc_state->uapi.color_mgmt_changed = true;
9647 for_each_intel_encoder_mask(dev, encoder,
9648 crtc_state->uapi.encoder_mask) {
9649 if (encoder->initial_fastset_check &&
9650 !encoder->initial_fastset_check(encoder, crtc_state)) {
9651 ret = drm_atomic_add_affected_connectors(state,
9660 ret = drm_atomic_commit(state);
9663 if (ret == -EDEADLK) {
9664 drm_atomic_state_clear(state);
9665 drm_modeset_backoff(&ctx);
9669 drm_atomic_state_put(state);
9671 drm_modeset_drop_locks(&ctx);
9672 drm_modeset_acquire_fini(&ctx);
9677 static void intel_mode_config_init(struct drm_i915_private *i915)
9679 struct drm_mode_config *mode_config = &i915->drm.mode_config;
9681 drm_mode_config_init(&i915->drm);
9682 INIT_LIST_HEAD(&i915->global_obj_list);
9684 mode_config->min_width = 0;
9685 mode_config->min_height = 0;
9687 mode_config->preferred_depth = 24;
9688 mode_config->prefer_shadow = 1;
9690 mode_config->funcs = &intel_mode_funcs;
9692 mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9695 * Maximum framebuffer dimensions, chosen to match
9696 * the maximum render engine surface size on gen4+.
9698 if (DISPLAY_VER(i915) >= 7) {
9699 mode_config->max_width = 16384;
9700 mode_config->max_height = 16384;
9701 } else if (DISPLAY_VER(i915) >= 4) {
9702 mode_config->max_width = 8192;
9703 mode_config->max_height = 8192;
9704 } else if (DISPLAY_VER(i915) == 3) {
9705 mode_config->max_width = 4096;
9706 mode_config->max_height = 4096;
9708 mode_config->max_width = 2048;
9709 mode_config->max_height = 2048;
9712 if (IS_I845G(i915) || IS_I865G(i915)) {
9713 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9714 mode_config->cursor_height = 1023;
9715 } else if (IS_I830(i915) || IS_I85X(i915) ||
9716 IS_I915G(i915) || IS_I915GM(i915)) {
9717 mode_config->cursor_width = 64;
9718 mode_config->cursor_height = 64;
9720 mode_config->cursor_width = 256;
9721 mode_config->cursor_height = 256;
9725 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9727 intel_atomic_global_obj_cleanup(i915);
9728 drm_mode_config_cleanup(&i915->drm);
9731 /* part #1: call before irq install */
9732 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9736 if (i915_inject_probe_failure(i915))
9739 if (HAS_DISPLAY(i915)) {
9740 ret = drm_vblank_init(&i915->drm,
9741 INTEL_NUM_PIPES(i915));
9746 intel_bios_init(i915);
9748 ret = intel_vga_register(i915);
9752 /* FIXME: completely on the wrong abstraction layer */
9753 intel_power_domains_init_hw(i915, false);
9755 if (!HAS_DISPLAY(i915))
9758 intel_dmc_ucode_init(i915);
9760 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9761 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9762 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9764 i915->framestart_delay = 1; /* 1-4 */
9766 i915->window2_delay = 0; /* No DSB so no window2 delay */
9768 intel_mode_config_init(i915);
9770 ret = intel_cdclk_init(i915);
9772 goto cleanup_vga_client_pw_domain_dmc;
9774 ret = intel_dbuf_init(i915);
9776 goto cleanup_vga_client_pw_domain_dmc;
9778 ret = intel_bw_init(i915);
9780 goto cleanup_vga_client_pw_domain_dmc;
9782 init_llist_head(&i915->atomic_helper.free_list);
9783 INIT_WORK(&i915->atomic_helper.free_work,
9784 intel_atomic_helper_free_state_worker);
9786 intel_init_quirks(i915);
9788 intel_fbc_init(i915);
9792 cleanup_vga_client_pw_domain_dmc:
9793 intel_dmc_ucode_fini(i915);
9794 intel_power_domains_driver_remove(i915);
9795 intel_vga_unregister(i915);
9797 intel_bios_driver_remove(i915);
9802 /* part #2: call after irq install, but before gem init */
9803 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9805 struct drm_device *dev = &i915->drm;
9807 struct intel_crtc *crtc;
9810 if (!HAS_DISPLAY(i915))
9813 intel_init_pm(i915);
9815 intel_panel_sanitize_ssc(i915);
9817 intel_pps_setup(i915);
9819 intel_gmbus_setup(i915);
9821 drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9822 INTEL_NUM_PIPES(i915),
9823 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9825 for_each_pipe(i915, pipe) {
9826 ret = intel_crtc_init(i915, pipe);
9828 intel_mode_config_cleanup(i915);
9833 intel_plane_possible_crtcs_init(i915);
9834 intel_shared_dpll_init(dev);
9835 intel_fdi_pll_freq_update(i915);
9837 intel_update_czclk(i915);
9838 intel_modeset_init_hw(i915);
9839 intel_dpll_update_ref_clks(i915);
9841 intel_hdcp_component_init(i915);
9843 if (i915->max_cdclk_freq == 0)
9844 intel_update_max_cdclk(i915);
9847 * If the platform has HTI, we need to find out whether it has reserved
9848 * any display resources before we create our display outputs.
9850 if (INTEL_INFO(i915)->display.has_hti)
9851 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9853 /* Just disable it once at startup */
9854 intel_vga_disable(i915);
9855 intel_setup_outputs(i915);
9857 drm_modeset_lock_all(dev);
9858 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9859 intel_acpi_assign_connector_fwnodes(i915);
9860 drm_modeset_unlock_all(dev);
9862 for_each_intel_crtc(dev, crtc) {
9863 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9865 intel_crtc_initial_plane_config(crtc);
9869 * Make sure hardware watermarks really match the state we read out.
9870 * Note that we need to do this after reconstructing the BIOS fb's
9871 * since the watermark calculation done here will use pstate->fb.
9873 if (!HAS_GMCH(i915))
9874 sanitize_watermarks(i915);
9879 /* part #3: call after gem init */
9880 int intel_modeset_init(struct drm_i915_private *i915)
9884 if (!HAS_DISPLAY(i915))
9888 * Force all active planes to recompute their states. So that on
9889 * mode_setcrtc after probe, all the intel_plane_state variables
9890 * are already calculated and there is no assert_plane warnings
9893 ret = intel_initial_commit(&i915->drm);
9895 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9897 intel_overlay_setup(i915);
9899 ret = intel_fbdev_init(&i915->drm);
9903 /* Only enable hotplug handling once the fbdev is fully set up. */
9904 intel_hpd_init(i915);
9905 intel_hpd_poll_disable(i915);
9907 intel_init_ipc(i915);
9912 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9914 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9915 /* 640x480@60Hz, ~25175 kHz */
9916 struct dpll clock = {
9926 drm_WARN_ON(&dev_priv->drm,
9927 i9xx_calc_dpll_params(48000, &clock) != 25154);
9929 drm_dbg_kms(&dev_priv->drm,
9930 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9931 pipe_name(pipe), clock.vco, clock.dot);
9933 fp = i9xx_dpll_compute_fp(&clock);
9934 dpll = DPLL_DVO_2X_MODE |
9936 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9937 PLL_P2_DIVIDE_BY_4 |
9938 PLL_REF_INPUT_DREFCLK |
9941 intel_de_write(dev_priv, FP0(pipe), fp);
9942 intel_de_write(dev_priv, FP1(pipe), fp);
9944 intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9945 intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9946 intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9947 intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9948 intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9949 intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9950 intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9953 * Apparently we need to have VGA mode enabled prior to changing
9954 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9955 * dividers, even though the register value does change.
9957 intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9958 intel_de_write(dev_priv, DPLL(pipe), dpll);
9960 /* Wait for the clocks to stabilize. */
9961 intel_de_posting_read(dev_priv, DPLL(pipe));
9964 /* The pixel multiplier can only be updated once the
9965 * DPLL is enabled and the clocks are stable.
9967 * So write it again.
9969 intel_de_write(dev_priv, DPLL(pipe), dpll);
9971 /* We do this three times for luck */
9972 for (i = 0; i < 3 ; i++) {
9973 intel_de_write(dev_priv, DPLL(pipe), dpll);
9974 intel_de_posting_read(dev_priv, DPLL(pipe));
9975 udelay(150); /* wait for warmup */
9978 intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
9979 intel_de_posting_read(dev_priv, PIPECONF(pipe));
9981 intel_wait_for_pipe_scanline_moving(crtc);
9984 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9986 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9988 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
9991 drm_WARN_ON(&dev_priv->drm,
9992 intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
9993 drm_WARN_ON(&dev_priv->drm,
9994 intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
9995 drm_WARN_ON(&dev_priv->drm,
9996 intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
9997 drm_WARN_ON(&dev_priv->drm,
9998 intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
9999 drm_WARN_ON(&dev_priv->drm,
10000 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
10002 intel_de_write(dev_priv, PIPECONF(pipe), 0);
10003 intel_de_posting_read(dev_priv, PIPECONF(pipe));
10005 intel_wait_for_pipe_scanline_stopped(crtc);
10007 intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10008 intel_de_posting_read(dev_priv, DPLL(pipe));
10012 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10014 struct intel_crtc *crtc;
10016 if (DISPLAY_VER(dev_priv) >= 4)
10019 for_each_intel_crtc(&dev_priv->drm, crtc) {
10020 struct intel_plane *plane =
10021 to_intel_plane(crtc->base.primary);
10022 struct intel_crtc *plane_crtc;
10025 if (!plane->get_hw_state(plane, &pipe))
10028 if (pipe == crtc->pipe)
10031 drm_dbg_kms(&dev_priv->drm,
10032 "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10033 plane->base.base.id, plane->base.name);
10035 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
10036 intel_plane_disable_noatomic(plane_crtc, plane);
10040 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10042 struct drm_device *dev = crtc->base.dev;
10043 struct intel_encoder *encoder;
10045 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10051 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10053 struct drm_device *dev = encoder->base.dev;
10054 struct intel_connector *connector;
10056 for_each_connector_on_encoder(dev, &encoder->base, connector)
10062 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10063 enum pipe pch_transcoder)
10065 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10066 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10069 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10071 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10072 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10073 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10075 if (DISPLAY_VER(dev_priv) >= 9 ||
10076 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10077 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10080 if (transcoder_is_dsi(cpu_transcoder))
10083 val = intel_de_read(dev_priv, reg);
10084 val &= ~HSW_FRAME_START_DELAY_MASK;
10085 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10086 intel_de_write(dev_priv, reg, val);
10088 i915_reg_t reg = PIPECONF(cpu_transcoder);
10091 val = intel_de_read(dev_priv, reg);
10092 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10093 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10094 intel_de_write(dev_priv, reg, val);
10097 if (!crtc_state->has_pch_encoder)
10100 if (HAS_PCH_IBX(dev_priv)) {
10101 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10104 val = intel_de_read(dev_priv, reg);
10105 val &= ~TRANS_FRAME_START_DELAY_MASK;
10106 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10107 intel_de_write(dev_priv, reg, val);
10109 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10110 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10113 val = intel_de_read(dev_priv, reg);
10114 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10115 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10116 intel_de_write(dev_priv, reg, val);
10120 static void intel_sanitize_crtc(struct intel_crtc *crtc,
10121 struct drm_modeset_acquire_ctx *ctx)
10123 struct drm_device *dev = crtc->base.dev;
10124 struct drm_i915_private *dev_priv = to_i915(dev);
10125 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10127 if (crtc_state->hw.active) {
10128 struct intel_plane *plane;
10130 /* Clear any frame start delays used for debugging left by the BIOS */
10131 intel_sanitize_frame_start_delay(crtc_state);
10133 /* Disable everything but the primary plane */
10134 for_each_intel_plane_on_crtc(dev, crtc, plane) {
10135 const struct intel_plane_state *plane_state =
10136 to_intel_plane_state(plane->base.state);
10138 if (plane_state->uapi.visible &&
10139 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10140 intel_plane_disable_noatomic(crtc, plane);
10143 /* Disable any background color/etc. set by the BIOS */
10144 intel_color_commit(crtc_state);
10147 /* Adjust the state of the output pipe according to whether we
10148 * have active connectors/encoders. */
10149 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10150 !crtc_state->bigjoiner_slave)
10151 intel_crtc_disable_noatomic(crtc, ctx);
10153 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10155 * We start out with underrun reporting disabled to avoid races.
10156 * For correct bookkeeping mark this on active crtcs.
10158 * Also on gmch platforms we dont have any hardware bits to
10159 * disable the underrun reporting. Which means we need to start
10160 * out with underrun reporting disabled also on inactive pipes,
10161 * since otherwise we'll complain about the garbage we read when
10162 * e.g. coming up after runtime pm.
10164 * No protection against concurrent access is required - at
10165 * worst a fifo underrun happens which also sets this to false.
10167 crtc->cpu_fifo_underrun_disabled = true;
10169 * We track the PCH trancoder underrun reporting state
10170 * within the crtc. With crtc for pipe A housing the underrun
10171 * reporting state for PCH transcoder A, crtc for pipe B housing
10172 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10173 * and marking underrun reporting as disabled for the non-existing
10174 * PCH transcoders B and C would prevent enabling the south
10175 * error interrupt (see cpt_can_enable_serr_int()).
10177 if (has_pch_trancoder(dev_priv, crtc->pipe))
10178 crtc->pch_fifo_underrun_disabled = true;
10182 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10184 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10187 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10188 * the hardware when a high res displays plugged in. DPLL P
10189 * divider is zero, and the pipe timings are bonkers. We'll
10190 * try to disable everything in that case.
10192 * FIXME would be nice to be able to sanitize this state
10193 * without several WARNs, but for now let's take the easy
10196 return IS_SANDYBRIDGE(dev_priv) &&
10197 crtc_state->hw.active &&
10198 crtc_state->shared_dpll &&
10199 crtc_state->port_clock == 0;
10202 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10204 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10205 struct intel_connector *connector;
10206 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10207 struct intel_crtc_state *crtc_state = crtc ?
10208 to_intel_crtc_state(crtc->base.state) : NULL;
10210 /* We need to check both for a crtc link (meaning that the
10211 * encoder is active and trying to read from a pipe) and the
10212 * pipe itself being active. */
10213 bool has_active_crtc = crtc_state &&
10214 crtc_state->hw.active;
10216 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10217 drm_dbg_kms(&dev_priv->drm,
10218 "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10219 pipe_name(crtc->pipe));
10220 has_active_crtc = false;
10223 connector = intel_encoder_find_connector(encoder);
10224 if (connector && !has_active_crtc) {
10225 drm_dbg_kms(&dev_priv->drm,
10226 "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10227 encoder->base.base.id,
10228 encoder->base.name);
10230 /* Connector is active, but has no active pipe. This is
10231 * fallout from our resume register restoring. Disable
10232 * the encoder manually again. */
10234 struct drm_encoder *best_encoder;
10236 drm_dbg_kms(&dev_priv->drm,
10237 "[ENCODER:%d:%s] manually disabled\n",
10238 encoder->base.base.id,
10239 encoder->base.name);
10241 /* avoid oopsing in case the hooks consult best_encoder */
10242 best_encoder = connector->base.state->best_encoder;
10243 connector->base.state->best_encoder = &encoder->base;
10245 /* FIXME NULL atomic state passed! */
10246 if (encoder->disable)
10247 encoder->disable(NULL, encoder, crtc_state,
10248 connector->base.state);
10249 if (encoder->post_disable)
10250 encoder->post_disable(NULL, encoder, crtc_state,
10251 connector->base.state);
10253 connector->base.state->best_encoder = best_encoder;
10255 encoder->base.crtc = NULL;
10257 /* Inconsistent output/port/pipe state happens presumably due to
10258 * a bug in one of the get_hw_state functions. Or someplace else
10259 * in our code, like the register restore mess on resume. Clamp
10260 * things to off as a safer default. */
10262 connector->base.dpms = DRM_MODE_DPMS_OFF;
10263 connector->base.encoder = NULL;
10266 /* notify opregion of the sanitized encoder state */
10267 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10269 if (HAS_DDI(dev_priv))
10270 intel_ddi_sanitize_encoder_pll_mapping(encoder);
10273 /* FIXME read out full plane state for all planes */
10274 static void readout_plane_state(struct drm_i915_private *dev_priv)
10276 struct intel_plane *plane;
10277 struct intel_crtc *crtc;
10279 for_each_intel_plane(&dev_priv->drm, plane) {
10280 struct intel_plane_state *plane_state =
10281 to_intel_plane_state(plane->base.state);
10282 struct intel_crtc_state *crtc_state;
10283 enum pipe pipe = PIPE_A;
10286 visible = plane->get_hw_state(plane, &pipe);
10288 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10289 crtc_state = to_intel_crtc_state(crtc->base.state);
10291 intel_set_plane_visible(crtc_state, plane_state, visible);
10293 drm_dbg_kms(&dev_priv->drm,
10294 "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10295 plane->base.base.id, plane->base.name,
10296 enableddisabled(visible), pipe_name(pipe));
10299 for_each_intel_crtc(&dev_priv->drm, crtc) {
10300 struct intel_crtc_state *crtc_state =
10301 to_intel_crtc_state(crtc->base.state);
10303 fixup_plane_bitmasks(crtc_state);
10307 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10309 struct drm_i915_private *dev_priv = to_i915(dev);
10310 struct intel_cdclk_state *cdclk_state =
10311 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10312 struct intel_dbuf_state *dbuf_state =
10313 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10315 struct intel_crtc *crtc;
10316 struct intel_encoder *encoder;
10317 struct intel_connector *connector;
10318 struct drm_connector_list_iter conn_iter;
10319 u8 active_pipes = 0;
10321 for_each_intel_crtc(dev, crtc) {
10322 struct intel_crtc_state *crtc_state =
10323 to_intel_crtc_state(crtc->base.state);
10325 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10326 intel_crtc_free_hw_state(crtc_state);
10327 intel_crtc_state_reset(crtc_state, crtc);
10329 intel_crtc_get_pipe_config(crtc_state);
10331 crtc_state->hw.enable = crtc_state->hw.active;
10333 crtc->base.enabled = crtc_state->hw.enable;
10334 crtc->active = crtc_state->hw.active;
10336 if (crtc_state->hw.active)
10337 active_pipes |= BIT(crtc->pipe);
10339 drm_dbg_kms(&dev_priv->drm,
10340 "[CRTC:%d:%s] hw state readout: %s\n",
10341 crtc->base.base.id, crtc->base.name,
10342 enableddisabled(crtc_state->hw.active));
10345 cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10347 readout_plane_state(dev_priv);
10349 for_each_intel_encoder(dev, encoder) {
10350 struct intel_crtc_state *crtc_state = NULL;
10354 if (encoder->get_hw_state(encoder, &pipe)) {
10355 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10356 crtc_state = to_intel_crtc_state(crtc->base.state);
10358 encoder->base.crtc = &crtc->base;
10359 intel_encoder_get_config(encoder, crtc_state);
10361 /* read out to slave crtc as well for bigjoiner */
10362 if (crtc_state->bigjoiner) {
10363 /* encoder should read be linked to bigjoiner master */
10364 WARN_ON(crtc_state->bigjoiner_slave);
10366 crtc = crtc_state->bigjoiner_linked_crtc;
10367 crtc_state = to_intel_crtc_state(crtc->base.state);
10368 intel_encoder_get_config(encoder, crtc_state);
10371 encoder->base.crtc = NULL;
10374 if (encoder->sync_state)
10375 encoder->sync_state(encoder, crtc_state);
10377 drm_dbg_kms(&dev_priv->drm,
10378 "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10379 encoder->base.base.id, encoder->base.name,
10380 enableddisabled(encoder->base.crtc),
10384 intel_dpll_readout_hw_state(dev_priv);
10386 drm_connector_list_iter_begin(dev, &conn_iter);
10387 for_each_intel_connector_iter(connector, &conn_iter) {
10388 if (connector->get_hw_state(connector)) {
10389 struct intel_crtc_state *crtc_state;
10390 struct intel_crtc *crtc;
10392 connector->base.dpms = DRM_MODE_DPMS_ON;
10394 encoder = intel_attached_encoder(connector);
10395 connector->base.encoder = &encoder->base;
10397 crtc = to_intel_crtc(encoder->base.crtc);
10398 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10400 if (crtc_state && crtc_state->hw.active) {
10402 * This has to be done during hardware readout
10403 * because anything calling .crtc_disable may
10404 * rely on the connector_mask being accurate.
10406 crtc_state->uapi.connector_mask |=
10407 drm_connector_mask(&connector->base);
10408 crtc_state->uapi.encoder_mask |=
10409 drm_encoder_mask(&encoder->base);
10412 connector->base.dpms = DRM_MODE_DPMS_OFF;
10413 connector->base.encoder = NULL;
10415 drm_dbg_kms(&dev_priv->drm,
10416 "[CONNECTOR:%d:%s] hw state readout: %s\n",
10417 connector->base.base.id, connector->base.name,
10418 enableddisabled(connector->base.encoder));
10420 drm_connector_list_iter_end(&conn_iter);
10422 for_each_intel_crtc(dev, crtc) {
10423 struct intel_bw_state *bw_state =
10424 to_intel_bw_state(dev_priv->bw_obj.state);
10425 struct intel_crtc_state *crtc_state =
10426 to_intel_crtc_state(crtc->base.state);
10427 struct intel_plane *plane;
10430 if (crtc_state->hw.active) {
10432 * The initial mode needs to be set in order to keep
10433 * the atomic core happy. It wants a valid mode if the
10434 * crtc's enabled, so we do the above call.
10436 * But we don't set all the derived state fully, hence
10437 * set a flag to indicate that a full recalculation is
10438 * needed on the next commit.
10440 crtc_state->inherited = true;
10442 intel_crtc_update_active_timings(crtc_state);
10444 intel_crtc_copy_hw_to_uapi_state(crtc_state);
10447 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10448 const struct intel_plane_state *plane_state =
10449 to_intel_plane_state(plane->base.state);
10452 * FIXME don't have the fb yet, so can't
10453 * use intel_plane_data_rate() :(
10455 if (plane_state->uapi.visible)
10456 crtc_state->data_rate[plane->id] =
10457 4 * crtc_state->pixel_rate;
10459 * FIXME don't have the fb yet, so can't
10460 * use plane->min_cdclk() :(
10462 if (plane_state->uapi.visible && plane->min_cdclk) {
10463 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10464 crtc_state->min_cdclk[plane->id] =
10465 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10467 crtc_state->min_cdclk[plane->id] =
10468 crtc_state->pixel_rate;
10470 drm_dbg_kms(&dev_priv->drm,
10471 "[PLANE:%d:%s] min_cdclk %d kHz\n",
10472 plane->base.base.id, plane->base.name,
10473 crtc_state->min_cdclk[plane->id]);
10476 if (crtc_state->hw.active) {
10477 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10478 if (drm_WARN_ON(dev, min_cdclk < 0))
10482 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10483 cdclk_state->min_voltage_level[crtc->pipe] =
10484 crtc_state->min_voltage_level;
10486 intel_bw_crtc_update(bw_state, crtc_state);
10488 intel_pipe_config_sanity_check(dev_priv, crtc_state);
10493 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10495 struct intel_encoder *encoder;
10497 for_each_intel_encoder(&dev_priv->drm, encoder) {
10498 struct intel_crtc_state *crtc_state;
10500 if (!encoder->get_power_domains)
10504 * MST-primary and inactive encoders don't have a crtc state
10505 * and neither of these require any power domain references.
10507 if (!encoder->base.crtc)
10510 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10511 encoder->get_power_domains(encoder, crtc_state);
10515 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10518 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10519 * Also known as Wa_14010480278.
10521 if (IS_DISPLAY_VER(dev_priv, 10, 12))
10522 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10523 intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10525 if (IS_HASWELL(dev_priv)) {
10527 * WaRsPkgCStateDisplayPMReq:hsw
10528 * System hang if this isn't done before disabling all planes!
10530 intel_de_write(dev_priv, CHICKEN_PAR1_1,
10531 intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10534 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10535 /* Display WA #1142:kbl,cfl,cml */
10536 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10537 KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10538 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10539 KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10540 KBL_ARB_FILL_SPARE_14);
10544 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10545 enum port port, i915_reg_t hdmi_reg)
10547 u32 val = intel_de_read(dev_priv, hdmi_reg);
10549 if (val & SDVO_ENABLE ||
10550 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10553 drm_dbg_kms(&dev_priv->drm,
10554 "Sanitizing transcoder select for HDMI %c\n",
10557 val &= ~SDVO_PIPE_SEL_MASK;
10558 val |= SDVO_PIPE_SEL(PIPE_A);
10560 intel_de_write(dev_priv, hdmi_reg, val);
10563 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10564 enum port port, i915_reg_t dp_reg)
10566 u32 val = intel_de_read(dev_priv, dp_reg);
10568 if (val & DP_PORT_EN ||
10569 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10572 drm_dbg_kms(&dev_priv->drm,
10573 "Sanitizing transcoder select for DP %c\n",
10576 val &= ~DP_PIPE_SEL_MASK;
10577 val |= DP_PIPE_SEL(PIPE_A);
10579 intel_de_write(dev_priv, dp_reg, val);
10582 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10585 * The BIOS may select transcoder B on some of the PCH
10586 * ports even it doesn't enable the port. This would trip
10587 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10588 * Sanitize the transcoder select bits to prevent that. We
10589 * assume that the BIOS never actually enabled the port,
10590 * because if it did we'd actually have to toggle the port
10591 * on and back off to make the transcoder A select stick
10592 * (see. intel_dp_link_down(), intel_disable_hdmi(),
10593 * intel_disable_sdvo()).
10595 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10596 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10597 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10599 /* PCH SDVOB multiplex with HDMIB */
10600 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10601 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10602 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10605 /* Scan out the current hw modeset state,
10606 * and sanitizes it to the current state
10609 intel_modeset_setup_hw_state(struct drm_device *dev,
10610 struct drm_modeset_acquire_ctx *ctx)
10612 struct drm_i915_private *dev_priv = to_i915(dev);
10613 struct intel_encoder *encoder;
10614 struct intel_crtc *crtc;
10615 intel_wakeref_t wakeref;
10617 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10619 intel_early_display_was(dev_priv);
10620 intel_modeset_readout_hw_state(dev);
10622 /* HW state is read out, now we need to sanitize this mess. */
10623 get_encoder_power_domains(dev_priv);
10625 if (HAS_PCH_IBX(dev_priv))
10626 ibx_sanitize_pch_ports(dev_priv);
10629 * intel_sanitize_plane_mapping() may need to do vblank
10630 * waits, so we need vblank interrupts restored beforehand.
10632 for_each_intel_crtc(&dev_priv->drm, crtc) {
10633 struct intel_crtc_state *crtc_state =
10634 to_intel_crtc_state(crtc->base.state);
10636 drm_crtc_vblank_reset(&crtc->base);
10638 if (crtc_state->hw.active)
10639 intel_crtc_vblank_on(crtc_state);
10642 intel_sanitize_plane_mapping(dev_priv);
10644 for_each_intel_encoder(dev, encoder)
10645 intel_sanitize_encoder(encoder);
10647 for_each_intel_crtc(&dev_priv->drm, crtc) {
10648 struct intel_crtc_state *crtc_state =
10649 to_intel_crtc_state(crtc->base.state);
10651 intel_sanitize_crtc(crtc, ctx);
10652 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10655 intel_modeset_update_connector_atomic_state(dev);
10657 intel_dpll_sanitize_state(dev_priv);
10659 if (IS_G4X(dev_priv)) {
10660 g4x_wm_get_hw_state(dev_priv);
10661 g4x_wm_sanitize(dev_priv);
10662 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10663 vlv_wm_get_hw_state(dev_priv);
10664 vlv_wm_sanitize(dev_priv);
10665 } else if (DISPLAY_VER(dev_priv) >= 9) {
10666 skl_wm_get_hw_state(dev_priv);
10667 } else if (HAS_PCH_SPLIT(dev_priv)) {
10668 ilk_wm_get_hw_state(dev_priv);
10671 for_each_intel_crtc(dev, crtc) {
10672 struct intel_crtc_state *crtc_state =
10673 to_intel_crtc_state(crtc->base.state);
10676 put_domains = modeset_get_crtc_power_domains(crtc_state);
10677 if (drm_WARN_ON(dev, put_domains))
10678 modeset_put_crtc_power_domains(crtc, put_domains);
10681 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10684 void intel_display_resume(struct drm_device *dev)
10686 struct drm_i915_private *dev_priv = to_i915(dev);
10687 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10688 struct drm_modeset_acquire_ctx ctx;
10691 if (!HAS_DISPLAY(dev_priv))
10694 dev_priv->modeset_restore_state = NULL;
10696 state->acquire_ctx = &ctx;
10698 drm_modeset_acquire_init(&ctx, 0);
10701 ret = drm_modeset_lock_all_ctx(dev, &ctx);
10702 if (ret != -EDEADLK)
10705 drm_modeset_backoff(&ctx);
10709 ret = __intel_display_resume(dev, state, &ctx);
10711 intel_enable_ipc(dev_priv);
10712 drm_modeset_drop_locks(&ctx);
10713 drm_modeset_acquire_fini(&ctx);
10716 drm_err(&dev_priv->drm,
10717 "Restoring old state failed with %i\n", ret);
10719 drm_atomic_state_put(state);
10722 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10724 struct intel_connector *connector;
10725 struct drm_connector_list_iter conn_iter;
10727 /* Kill all the work that may have been queued by hpd. */
10728 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10729 for_each_intel_connector_iter(connector, &conn_iter) {
10730 if (connector->modeset_retry_work.func)
10731 cancel_work_sync(&connector->modeset_retry_work);
10732 if (connector->hdcp.shim) {
10733 cancel_delayed_work_sync(&connector->hdcp.check_work);
10734 cancel_work_sync(&connector->hdcp.prop_work);
10737 drm_connector_list_iter_end(&conn_iter);
10740 /* part #1: call before irq uninstall */
10741 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10743 if (!HAS_DISPLAY(i915))
10746 flush_workqueue(i915->flip_wq);
10747 flush_workqueue(i915->modeset_wq);
10749 flush_work(&i915->atomic_helper.free_work);
10750 drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10753 /* part #2: call after irq uninstall */
10754 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10756 if (!HAS_DISPLAY(i915))
10760 * Due to the hpd irq storm handling the hotplug work can re-arm the
10761 * poll handlers. Hence disable polling after hpd handling is shut down.
10763 intel_hpd_poll_fini(i915);
10766 * MST topology needs to be suspended so we don't have any calls to
10767 * fbdev after it's finalized. MST will be destroyed later as part of
10768 * drm_mode_config_cleanup()
10770 intel_dp_mst_suspend(i915);
10772 /* poll work can call into fbdev, hence clean that up afterwards */
10773 intel_fbdev_fini(i915);
10775 intel_unregister_dsm_handler();
10777 intel_fbc_global_disable(i915);
10779 /* flush any delayed tasks or pending work */
10780 flush_scheduled_work();
10782 intel_hdcp_component_fini(i915);
10784 intel_mode_config_cleanup(i915);
10786 intel_overlay_cleanup(i915);
10788 intel_gmbus_teardown(i915);
10790 destroy_workqueue(i915->flip_wq);
10791 destroy_workqueue(i915->modeset_wq);
10793 intel_fbc_cleanup(i915);
10796 /* part #3: call after gem init */
10797 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10799 intel_dmc_ucode_fini(i915);
10801 intel_power_domains_driver_remove(i915);
10803 intel_vga_unregister(i915);
10805 intel_bios_driver_remove(i915);
10808 bool intel_modeset_probe_defer(struct pci_dev *pdev)
10810 struct drm_privacy_screen *privacy_screen;
10813 * apple-gmux is needed on dual GPU MacBook Pro
10814 * to probe the panel if we're the inactive GPU.
10816 if (vga_switcheroo_client_probe_defer(pdev))
10819 /* If the LCD panel has a privacy-screen, wait for it */
10820 privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10821 if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10824 drm_privacy_screen_put(privacy_screen);
10829 void intel_display_driver_register(struct drm_i915_private *i915)
10831 if (!HAS_DISPLAY(i915))
10834 intel_display_debugfs_register(i915);
10836 /* Must be done after probing outputs */
10837 intel_opregion_register(i915);
10838 acpi_video_register();
10840 intel_audio_init(i915);
10843 * Some ports require correctly set-up hpd registers for
10844 * detection to work properly (leading to ghost connected
10845 * connector status), e.g. VGA on gm45. Hence we can only set
10846 * up the initial fbdev config after hpd irqs are fully
10847 * enabled. We do it last so that the async config cannot run
10848 * before the connectors are registered.
10850 intel_fbdev_initial_config_async(&i915->drm);
10853 * We need to coordinate the hotplugs with the asynchronous
10854 * fbdev configuration, for which we use the
10855 * fbdev->async_cookie.
10857 drm_kms_helper_poll_init(&i915->drm);
10860 void intel_display_driver_unregister(struct drm_i915_private *i915)
10862 if (!HAS_DISPLAY(i915))
10865 intel_fbdev_unregister(i915);
10866 intel_audio_deinit(i915);
10869 * After flushing the fbdev (incl. a late async config which
10870 * will have delayed queuing of a hotplug event), then flush
10871 * the hotplug events.
10873 drm_kms_helper_poll_fini(&i915->drm);
10874 drm_atomic_helper_shutdown(&i915->drm);
10876 acpi_video_unregister();
10877 intel_opregion_unregister(i915);