drm/i915: Nuke intel_dp_set_m_n()
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 #include <linux/vga_switcheroo.h>
36
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_dp_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_privacy_screen_consumer.h>
46 #include <drm/drm_probe_helper.h>
47 #include <drm/drm_rect.h>
48
49 #include "display/intel_audio.h"
50 #include "display/intel_crt.h"
51 #include "display/intel_ddi.h"
52 #include "display/intel_display_debugfs.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
69
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
72
73 #include "gt/gen8_ppgtt.h"
74
75 #include "g4x_dp.h"
76 #include "g4x_hdmi.h"
77 #include "i915_drv.h"
78 #include "icl_dsi.h"
79 #include "intel_acpi.h"
80 #include "intel_atomic.h"
81 #include "intel_atomic_plane.h"
82 #include "intel_bw.h"
83 #include "intel_cdclk.h"
84 #include "intel_color.h"
85 #include "intel_crtc.h"
86 #include "intel_de.h"
87 #include "intel_display_types.h"
88 #include "intel_dmc.h"
89 #include "intel_dp_link_training.h"
90 #include "intel_dpt.h"
91 #include "intel_fbc.h"
92 #include "intel_fbdev.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdcp.h"
97 #include "intel_hotplug.h"
98 #include "intel_overlay.h"
99 #include "intel_panel.h"
100 #include "intel_pch_display.h"
101 #include "intel_pch_refclk.h"
102 #include "intel_pcode.h"
103 #include "intel_pipe_crc.h"
104 #include "intel_plane_initial.h"
105 #include "intel_pm.h"
106 #include "intel_pps.h"
107 #include "intel_psr.h"
108 #include "intel_quirks.h"
109 #include "intel_sprite.h"
110 #include "intel_tc.h"
111 #include "intel_vga.h"
112 #include "i9xx_plane.h"
113 #include "skl_scaler.h"
114 #include "skl_universal_plane.h"
115 #include "vlv_dsi_pll.h"
116 #include "vlv_sideband.h"
117 #include "vlv_dsi.h"
118
119 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
120 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
121 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
122                                          const struct intel_link_m_n *m_n);
123 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
126 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
127 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
128 static void intel_modeset_setup_hw_state(struct drm_device *dev,
129                                          struct drm_modeset_acquire_ctx *ctx);
130
131 /**
132  * intel_update_watermarks - update FIFO watermark values based on current modes
133  * @dev_priv: i915 device
134  *
135  * Calculate watermark values for the various WM regs based on current mode
136  * and plane configuration.
137  *
138  * There are several cases to deal with here:
139  *   - normal (i.e. non-self-refresh)
140  *   - self-refresh (SR) mode
141  *   - lines are large relative to FIFO size (buffer can hold up to 2)
142  *   - lines are small relative to FIFO size (buffer can hold more than 2
143  *     lines), so need to account for TLB latency
144  *
145  *   The normal calculation is:
146  *     watermark = dotclock * bytes per pixel * latency
147  *   where latency is platform & configuration dependent (we assume pessimal
148  *   values here).
149  *
150  *   The SR calculation is:
151  *     watermark = (trunc(latency/line time)+1) * surface width *
152  *       bytes per pixel
153  *   where
154  *     line time = htotal / dotclock
155  *     surface width = hdisplay for normal plane and 64 for cursor
156  *   and latency is assumed to be high, as above.
157  *
158  * The final value programmed to the register should always be rounded up,
159  * and include an extra 2 entries to account for clock crossings.
160  *
161  * We don't use the sprite, so we can ignore that.  And on Crestline we have
162  * to set the non-SR watermarks to 8.
163  */
164 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
165 {
166         if (dev_priv->wm_disp->update_wm)
167                 dev_priv->wm_disp->update_wm(dev_priv);
168 }
169
170 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
171                                  struct intel_crtc *crtc)
172 {
173         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
174         if (dev_priv->wm_disp->compute_pipe_wm)
175                 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
176         return 0;
177 }
178
179 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
180                                          struct intel_crtc *crtc)
181 {
182         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
183         if (!dev_priv->wm_disp->compute_intermediate_wm)
184                 return 0;
185         if (drm_WARN_ON(&dev_priv->drm,
186                         !dev_priv->wm_disp->compute_pipe_wm))
187                 return 0;
188         return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
189 }
190
191 static bool intel_initial_watermarks(struct intel_atomic_state *state,
192                                      struct intel_crtc *crtc)
193 {
194         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
195         if (dev_priv->wm_disp->initial_watermarks) {
196                 dev_priv->wm_disp->initial_watermarks(state, crtc);
197                 return true;
198         }
199         return false;
200 }
201
202 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
203                                            struct intel_crtc *crtc)
204 {
205         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
206         if (dev_priv->wm_disp->atomic_update_watermarks)
207                 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
208 }
209
210 static void intel_optimize_watermarks(struct intel_atomic_state *state,
211                                       struct intel_crtc *crtc)
212 {
213         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
214         if (dev_priv->wm_disp->optimize_watermarks)
215                 dev_priv->wm_disp->optimize_watermarks(state, crtc);
216 }
217
218 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
219 {
220         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
221         if (dev_priv->wm_disp->compute_global_watermarks)
222                 return dev_priv->wm_disp->compute_global_watermarks(state);
223         return 0;
224 }
225
226 /* returns HPLL frequency in kHz */
227 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
228 {
229         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
230
231         /* Obtain SKU information */
232         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
233                 CCK_FUSE_HPLL_FREQ_MASK;
234
235         return vco_freq[hpll_freq] * 1000;
236 }
237
238 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
239                       const char *name, u32 reg, int ref_freq)
240 {
241         u32 val;
242         int divider;
243
244         val = vlv_cck_read(dev_priv, reg);
245         divider = val & CCK_FREQUENCY_VALUES;
246
247         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
248                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
249                  "%s change in progress\n", name);
250
251         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
252 }
253
254 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
255                            const char *name, u32 reg)
256 {
257         int hpll;
258
259         vlv_cck_get(dev_priv);
260
261         if (dev_priv->hpll_freq == 0)
262                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
263
264         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
265
266         vlv_cck_put(dev_priv);
267
268         return hpll;
269 }
270
271 static void intel_update_czclk(struct drm_i915_private *dev_priv)
272 {
273         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
274                 return;
275
276         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
277                                                       CCK_CZ_CLOCK_CONTROL);
278
279         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
280                 dev_priv->czclk_freq);
281 }
282
283 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
284 {
285         return (crtc_state->active_planes &
286                 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
287 }
288
289 /* WA Display #0827: Gen9:all */
290 static void
291 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
292 {
293         if (enable)
294                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
295                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
296         else
297                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
298                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
299 }
300
301 /* Wa_2006604312:icl,ehl */
302 static void
303 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
304                        bool enable)
305 {
306         if (enable)
307                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
308                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
309         else
310                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
311                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
312 }
313
314 /* Wa_1604331009:icl,jsl,ehl */
315 static void
316 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
317                        bool enable)
318 {
319         intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
320                      enable ? CURSOR_GATING_DIS : 0);
321 }
322
323 static bool
324 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
325 {
326         return crtc_state->master_transcoder != INVALID_TRANSCODER;
327 }
328
329 static bool
330 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
331 {
332         return crtc_state->sync_mode_slaves_mask != 0;
333 }
334
335 bool
336 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
337 {
338         return is_trans_port_sync_master(crtc_state) ||
339                 is_trans_port_sync_slave(crtc_state);
340 }
341
342 static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
343 {
344         if (crtc_state->bigjoiner_slave)
345                 return crtc_state->bigjoiner_linked_crtc;
346         else
347                 return to_intel_crtc(crtc_state->uapi.crtc);
348 }
349
350 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
351                                     enum pipe pipe)
352 {
353         i915_reg_t reg = PIPEDSL(pipe);
354         u32 line1, line2;
355
356         line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
357         msleep(5);
358         line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
359
360         return line1 != line2;
361 }
362
363 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
364 {
365         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
366         enum pipe pipe = crtc->pipe;
367
368         /* Wait for the display line to settle/start moving */
369         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
370                 drm_err(&dev_priv->drm,
371                         "pipe %c scanline %s wait timed out\n",
372                         pipe_name(pipe), onoff(state));
373 }
374
375 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
376 {
377         wait_for_pipe_scanline_moving(crtc, false);
378 }
379
380 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
381 {
382         wait_for_pipe_scanline_moving(crtc, true);
383 }
384
385 static void
386 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
387 {
388         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
389         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
390
391         if (DISPLAY_VER(dev_priv) >= 4) {
392                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
393
394                 /* Wait for the Pipe State to go off */
395                 if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
396                                             PIPECONF_STATE_ENABLE, 100))
397                         drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
398         } else {
399                 intel_wait_for_pipe_scanline_stopped(crtc);
400         }
401 }
402
403 void assert_transcoder(struct drm_i915_private *dev_priv,
404                        enum transcoder cpu_transcoder, bool state)
405 {
406         bool cur_state;
407         enum intel_display_power_domain power_domain;
408         intel_wakeref_t wakeref;
409
410         /* we keep both pipes enabled on 830 */
411         if (IS_I830(dev_priv))
412                 state = true;
413
414         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
415         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
416         if (wakeref) {
417                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
418                 cur_state = !!(val & PIPECONF_ENABLE);
419
420                 intel_display_power_put(dev_priv, power_domain, wakeref);
421         } else {
422                 cur_state = false;
423         }
424
425         I915_STATE_WARN(cur_state != state,
426                         "transcoder %s assertion failure (expected %s, current %s)\n",
427                         transcoder_name(cpu_transcoder),
428                         onoff(state), onoff(cur_state));
429 }
430
431 static void assert_plane(struct intel_plane *plane, bool state)
432 {
433         enum pipe pipe;
434         bool cur_state;
435
436         cur_state = plane->get_hw_state(plane, &pipe);
437
438         I915_STATE_WARN(cur_state != state,
439                         "%s assertion failure (expected %s, current %s)\n",
440                         plane->base.name, onoff(state), onoff(cur_state));
441 }
442
443 #define assert_plane_enabled(p) assert_plane(p, true)
444 #define assert_plane_disabled(p) assert_plane(p, false)
445
446 static void assert_planes_disabled(struct intel_crtc *crtc)
447 {
448         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
449         struct intel_plane *plane;
450
451         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
452                 assert_plane_disabled(plane);
453 }
454
455 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
456                          struct intel_digital_port *dig_port,
457                          unsigned int expected_mask)
458 {
459         u32 port_mask;
460         i915_reg_t dpll_reg;
461
462         switch (dig_port->base.port) {
463         case PORT_B:
464                 port_mask = DPLL_PORTB_READY_MASK;
465                 dpll_reg = DPLL(0);
466                 break;
467         case PORT_C:
468                 port_mask = DPLL_PORTC_READY_MASK;
469                 dpll_reg = DPLL(0);
470                 expected_mask <<= 4;
471                 break;
472         case PORT_D:
473                 port_mask = DPLL_PORTD_READY_MASK;
474                 dpll_reg = DPIO_PHY_STATUS;
475                 break;
476         default:
477                 BUG();
478         }
479
480         if (intel_de_wait_for_register(dev_priv, dpll_reg,
481                                        port_mask, expected_mask, 1000))
482                 drm_WARN(&dev_priv->drm, 1,
483                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
484                          dig_port->base.base.base.id, dig_port->base.base.name,
485                          intel_de_read(dev_priv, dpll_reg) & port_mask,
486                          expected_mask);
487 }
488
489 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
490 {
491         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492
493         if (HAS_PCH_LPT(dev_priv))
494                 return PIPE_A;
495         else
496                 return crtc->pipe;
497 }
498
499 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
500 {
501         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
502         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
503         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
504         enum pipe pipe = crtc->pipe;
505         i915_reg_t reg;
506         u32 val;
507
508         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
509
510         assert_planes_disabled(crtc);
511
512         /*
513          * A pipe without a PLL won't actually be able to drive bits from
514          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
515          * need the check.
516          */
517         if (HAS_GMCH(dev_priv)) {
518                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
519                         assert_dsi_pll_enabled(dev_priv);
520                 else
521                         assert_pll_enabled(dev_priv, pipe);
522         } else {
523                 if (new_crtc_state->has_pch_encoder) {
524                         /* if driving the PCH, we need FDI enabled */
525                         assert_fdi_rx_pll_enabled(dev_priv,
526                                                   intel_crtc_pch_transcoder(crtc));
527                         assert_fdi_tx_pll_enabled(dev_priv,
528                                                   (enum pipe) cpu_transcoder);
529                 }
530                 /* FIXME: assert CPU port conditions for SNB+ */
531         }
532
533         /* Wa_22012358565:adl-p */
534         if (DISPLAY_VER(dev_priv) == 13)
535                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
536                              0, PIPE_ARB_USE_PROG_SLOTS);
537
538         reg = PIPECONF(cpu_transcoder);
539         val = intel_de_read(dev_priv, reg);
540         if (val & PIPECONF_ENABLE) {
541                 /* we keep both pipes enabled on 830 */
542                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
543                 return;
544         }
545
546         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
547         intel_de_posting_read(dev_priv, reg);
548
549         /*
550          * Until the pipe starts PIPEDSL reads will return a stale value,
551          * which causes an apparent vblank timestamp jump when PIPEDSL
552          * resets to its proper value. That also messes up the frame count
553          * when it's derived from the timestamps. So let's wait for the
554          * pipe to start properly before we call drm_crtc_vblank_on()
555          */
556         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
557                 intel_wait_for_pipe_scanline_moving(crtc);
558 }
559
560 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
561 {
562         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
563         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
564         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
565         enum pipe pipe = crtc->pipe;
566         i915_reg_t reg;
567         u32 val;
568
569         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
570
571         /*
572          * Make sure planes won't keep trying to pump pixels to us,
573          * or we might hang the display.
574          */
575         assert_planes_disabled(crtc);
576
577         reg = PIPECONF(cpu_transcoder);
578         val = intel_de_read(dev_priv, reg);
579         if ((val & PIPECONF_ENABLE) == 0)
580                 return;
581
582         /*
583          * Double wide has implications for planes
584          * so best keep it disabled when not needed.
585          */
586         if (old_crtc_state->double_wide)
587                 val &= ~PIPECONF_DOUBLE_WIDE;
588
589         /* Don't disable pipe or pipe PLLs if needed */
590         if (!IS_I830(dev_priv))
591                 val &= ~PIPECONF_ENABLE;
592
593         if (DISPLAY_VER(dev_priv) >= 12)
594                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
595                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
596
597         intel_de_write(dev_priv, reg, val);
598         if ((val & PIPECONF_ENABLE) == 0)
599                 intel_wait_for_pipe_off(old_crtc_state);
600 }
601
602 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
603 {
604         unsigned int size = 0;
605         int i;
606
607         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
608                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
609
610         return size;
611 }
612
613 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
614 {
615         unsigned int size = 0;
616         int i;
617
618         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
619                 unsigned int plane_size;
620
621                 if (rem_info->plane[i].linear)
622                         plane_size = rem_info->plane[i].size;
623                 else
624                         plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
625
626                 if (plane_size == 0)
627                         continue;
628
629                 if (rem_info->plane_alignment)
630                         size = ALIGN(size, rem_info->plane_alignment);
631
632                 size += plane_size;
633         }
634
635         return size;
636 }
637
638 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
639 {
640         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
641         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
642
643         return DISPLAY_VER(dev_priv) < 4 ||
644                 (plane->fbc &&
645                  plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
646 }
647
648 /*
649  * Convert the x/y offsets into a linear offset.
650  * Only valid with 0/180 degree rotation, which is fine since linear
651  * offset is only used with linear buffers on pre-hsw and tiled buffers
652  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
653  */
654 u32 intel_fb_xy_to_linear(int x, int y,
655                           const struct intel_plane_state *state,
656                           int color_plane)
657 {
658         const struct drm_framebuffer *fb = state->hw.fb;
659         unsigned int cpp = fb->format->cpp[color_plane];
660         unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
661
662         return y * pitch + x * cpp;
663 }
664
665 /*
666  * Add the x/y offsets derived from fb->offsets[] to the user
667  * specified plane src x/y offsets. The resulting x/y offsets
668  * specify the start of scanout from the beginning of the gtt mapping.
669  */
670 void intel_add_fb_offsets(int *x, int *y,
671                           const struct intel_plane_state *state,
672                           int color_plane)
673
674 {
675         *x += state->view.color_plane[color_plane].x;
676         *y += state->view.color_plane[color_plane].y;
677 }
678
679 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
680                               u32 pixel_format, u64 modifier)
681 {
682         struct intel_crtc *crtc;
683         struct intel_plane *plane;
684
685         if (!HAS_DISPLAY(dev_priv))
686                 return 0;
687
688         /*
689          * We assume the primary plane for pipe A has
690          * the highest stride limits of them all,
691          * if in case pipe A is disabled, use the first pipe from pipe_mask.
692          */
693         crtc = intel_first_crtc(dev_priv);
694         if (!crtc)
695                 return 0;
696
697         plane = to_intel_plane(crtc->base.primary);
698
699         return plane->max_stride(plane, pixel_format, modifier,
700                                  DRM_MODE_ROTATE_0);
701 }
702
703 static void
704 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
705                         struct intel_plane_state *plane_state,
706                         bool visible)
707 {
708         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
709
710         plane_state->uapi.visible = visible;
711
712         if (visible)
713                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
714         else
715                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
716 }
717
718 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
719 {
720         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
721         struct drm_plane *plane;
722
723         /*
724          * Active_planes aliases if multiple "primary" or cursor planes
725          * have been used on the same (or wrong) pipe. plane_mask uses
726          * unique ids, hence we can use that to reconstruct active_planes.
727          */
728         crtc_state->enabled_planes = 0;
729         crtc_state->active_planes = 0;
730
731         drm_for_each_plane_mask(plane, &dev_priv->drm,
732                                 crtc_state->uapi.plane_mask) {
733                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
734                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
735         }
736 }
737
738 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
739                                   struct intel_plane *plane)
740 {
741         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
742         struct intel_crtc_state *crtc_state =
743                 to_intel_crtc_state(crtc->base.state);
744         struct intel_plane_state *plane_state =
745                 to_intel_plane_state(plane->base.state);
746
747         drm_dbg_kms(&dev_priv->drm,
748                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
749                     plane->base.base.id, plane->base.name,
750                     crtc->base.base.id, crtc->base.name);
751
752         intel_set_plane_visible(crtc_state, plane_state, false);
753         fixup_plane_bitmasks(crtc_state);
754         crtc_state->data_rate[plane->id] = 0;
755         crtc_state->min_cdclk[plane->id] = 0;
756
757         if (plane->id == PLANE_PRIMARY)
758                 hsw_disable_ips(crtc_state);
759
760         /*
761          * Vblank time updates from the shadow to live plane control register
762          * are blocked if the memory self-refresh mode is active at that
763          * moment. So to make sure the plane gets truly disabled, disable
764          * first the self-refresh mode. The self-refresh enable bit in turn
765          * will be checked/applied by the HW only at the next frame start
766          * event which is after the vblank start event, so we need to have a
767          * wait-for-vblank between disabling the plane and the pipe.
768          */
769         if (HAS_GMCH(dev_priv) &&
770             intel_set_memory_cxsr(dev_priv, false))
771                 intel_crtc_wait_for_next_vblank(crtc);
772
773         /*
774          * Gen2 reports pipe underruns whenever all planes are disabled.
775          * So disable underrun reporting before all the planes get disabled.
776          */
777         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
778                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
779
780         intel_plane_disable_arm(plane, crtc_state);
781         intel_crtc_wait_for_next_vblank(crtc);
782 }
783
784 unsigned int
785 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
786 {
787         int x = 0, y = 0;
788
789         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
790                                           plane_state->view.color_plane[0].offset, 0);
791
792         return y;
793 }
794
795 static int
796 __intel_display_resume(struct drm_device *dev,
797                        struct drm_atomic_state *state,
798                        struct drm_modeset_acquire_ctx *ctx)
799 {
800         struct drm_crtc_state *crtc_state;
801         struct drm_crtc *crtc;
802         int i, ret;
803
804         intel_modeset_setup_hw_state(dev, ctx);
805         intel_vga_redisable(to_i915(dev));
806
807         if (!state)
808                 return 0;
809
810         /*
811          * We've duplicated the state, pointers to the old state are invalid.
812          *
813          * Don't attempt to use the old state until we commit the duplicated state.
814          */
815         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
816                 /*
817                  * Force recalculation even if we restore
818                  * current state. With fast modeset this may not result
819                  * in a modeset when the state is compatible.
820                  */
821                 crtc_state->mode_changed = true;
822         }
823
824         /* ignore any reset values/BIOS leftovers in the WM registers */
825         if (!HAS_GMCH(to_i915(dev)))
826                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
827
828         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
829
830         drm_WARN_ON(dev, ret == -EDEADLK);
831         return ret;
832 }
833
834 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
835 {
836         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
837                 intel_has_gpu_reset(to_gt(dev_priv)));
838 }
839
840 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
841 {
842         struct drm_device *dev = &dev_priv->drm;
843         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
844         struct drm_atomic_state *state;
845         int ret;
846
847         if (!HAS_DISPLAY(dev_priv))
848                 return;
849
850         /* reset doesn't touch the display */
851         if (!dev_priv->params.force_reset_modeset_test &&
852             !gpu_reset_clobbers_display(dev_priv))
853                 return;
854
855         /* We have a modeset vs reset deadlock, defensively unbreak it. */
856         set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
857         smp_mb__after_atomic();
858         wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
859
860         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
861                 drm_dbg_kms(&dev_priv->drm,
862                             "Modeset potentially stuck, unbreaking through wedging\n");
863                 intel_gt_set_wedged(to_gt(dev_priv));
864         }
865
866         /*
867          * Need mode_config.mutex so that we don't
868          * trample ongoing ->detect() and whatnot.
869          */
870         mutex_lock(&dev->mode_config.mutex);
871         drm_modeset_acquire_init(ctx, 0);
872         while (1) {
873                 ret = drm_modeset_lock_all_ctx(dev, ctx);
874                 if (ret != -EDEADLK)
875                         break;
876
877                 drm_modeset_backoff(ctx);
878         }
879         /*
880          * Disabling the crtcs gracefully seems nicer. Also the
881          * g33 docs say we should at least disable all the planes.
882          */
883         state = drm_atomic_helper_duplicate_state(dev, ctx);
884         if (IS_ERR(state)) {
885                 ret = PTR_ERR(state);
886                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
887                         ret);
888                 return;
889         }
890
891         ret = drm_atomic_helper_disable_all(dev, ctx);
892         if (ret) {
893                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
894                         ret);
895                 drm_atomic_state_put(state);
896                 return;
897         }
898
899         dev_priv->modeset_restore_state = state;
900         state->acquire_ctx = ctx;
901 }
902
903 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
904 {
905         struct drm_device *dev = &dev_priv->drm;
906         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
907         struct drm_atomic_state *state;
908         int ret;
909
910         if (!HAS_DISPLAY(dev_priv))
911                 return;
912
913         /* reset doesn't touch the display */
914         if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
915                 return;
916
917         state = fetch_and_zero(&dev_priv->modeset_restore_state);
918         if (!state)
919                 goto unlock;
920
921         /* reset doesn't touch the display */
922         if (!gpu_reset_clobbers_display(dev_priv)) {
923                 /* for testing only restore the display */
924                 ret = __intel_display_resume(dev, state, ctx);
925                 if (ret)
926                         drm_err(&dev_priv->drm,
927                                 "Restoring old state failed with %i\n", ret);
928         } else {
929                 /*
930                  * The display has been reset as well,
931                  * so need a full re-initialization.
932                  */
933                 intel_pps_unlock_regs_wa(dev_priv);
934                 intel_modeset_init_hw(dev_priv);
935                 intel_init_clock_gating(dev_priv);
936                 intel_hpd_init(dev_priv);
937
938                 ret = __intel_display_resume(dev, state, ctx);
939                 if (ret)
940                         drm_err(&dev_priv->drm,
941                                 "Restoring old state failed with %i\n", ret);
942
943                 intel_hpd_poll_disable(dev_priv);
944         }
945
946         drm_atomic_state_put(state);
947 unlock:
948         drm_modeset_drop_locks(ctx);
949         drm_modeset_acquire_fini(ctx);
950         mutex_unlock(&dev->mode_config.mutex);
951
952         clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
953 }
954
955 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
956 {
957         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
958         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
959         enum pipe pipe = crtc->pipe;
960         u32 tmp;
961
962         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
963
964         /*
965          * Display WA #1153: icl
966          * enable hardware to bypass the alpha math
967          * and rounding for per-pixel values 00 and 0xff
968          */
969         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
970         /*
971          * Display WA # 1605353570: icl
972          * Set the pixel rounding bit to 1 for allowing
973          * passthrough of Frame buffer pixels unmodified
974          * across pipe
975          */
976         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
977
978         /*
979          * Underrun recovery must always be disabled on display 13+.
980          * DG2 chicken bit meaning is inverted compared to other platforms.
981          */
982         if (IS_DG2(dev_priv))
983                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
984         else if (DISPLAY_VER(dev_priv) >= 13)
985                 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
986
987         /* Wa_14010547955:dg2 */
988         if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
989                 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
990
991         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
992 }
993
994 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
995 {
996         struct drm_crtc *crtc;
997         bool cleanup_done;
998
999         drm_for_each_crtc(crtc, &dev_priv->drm) {
1000                 struct drm_crtc_commit *commit;
1001                 spin_lock(&crtc->commit_lock);
1002                 commit = list_first_entry_or_null(&crtc->commit_list,
1003                                                   struct drm_crtc_commit, commit_entry);
1004                 cleanup_done = commit ?
1005                         try_wait_for_completion(&commit->cleanup_done) : true;
1006                 spin_unlock(&crtc->commit_lock);
1007
1008                 if (cleanup_done)
1009                         continue;
1010
1011                 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1012
1013                 return true;
1014         }
1015
1016         return false;
1017 }
1018
1019 /*
1020  * Finds the encoder associated with the given CRTC. This can only be
1021  * used when we know that the CRTC isn't feeding multiple encoders!
1022  */
1023 struct intel_encoder *
1024 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1025                            const struct intel_crtc_state *crtc_state)
1026 {
1027         const struct drm_connector_state *connector_state;
1028         const struct drm_connector *connector;
1029         struct intel_encoder *encoder = NULL;
1030         struct intel_crtc *master_crtc;
1031         int num_encoders = 0;
1032         int i;
1033
1034         master_crtc = intel_master_crtc(crtc_state);
1035
1036         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1037                 if (connector_state->crtc != &master_crtc->base)
1038                         continue;
1039
1040                 encoder = to_intel_encoder(connector_state->best_encoder);
1041                 num_encoders++;
1042         }
1043
1044         drm_WARN(encoder->base.dev, num_encoders != 1,
1045                  "%d encoders for pipe %c\n",
1046                  num_encoders, pipe_name(master_crtc->pipe));
1047
1048         return encoder;
1049 }
1050
1051 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1052                                enum pipe pipe)
1053 {
1054         i915_reg_t dslreg = PIPEDSL(pipe);
1055         u32 temp;
1056
1057         temp = intel_de_read(dev_priv, dslreg);
1058         udelay(500);
1059         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1060                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1061                         drm_err(&dev_priv->drm,
1062                                 "mode set failed: pipe %c stuck\n",
1063                                 pipe_name(pipe));
1064         }
1065 }
1066
1067 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1068 {
1069         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1070         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1071         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1072         enum pipe pipe = crtc->pipe;
1073         int width = drm_rect_width(dst);
1074         int height = drm_rect_height(dst);
1075         int x = dst->x1;
1076         int y = dst->y1;
1077
1078         if (!crtc_state->pch_pfit.enabled)
1079                 return;
1080
1081         /* Force use of hard-coded filter coefficients
1082          * as some pre-programmed values are broken,
1083          * e.g. x201.
1084          */
1085         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1086                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1087                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1088         else
1089                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1090                                PF_FILTER_MED_3x3);
1091         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1092         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1093 }
1094
1095 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1096 {
1097         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1098         struct drm_device *dev = crtc->base.dev;
1099         struct drm_i915_private *dev_priv = to_i915(dev);
1100
1101         if (!crtc_state->ips_enabled)
1102                 return;
1103
1104         /*
1105          * We can only enable IPS after we enable a plane and wait for a vblank
1106          * This function is called from post_plane_update, which is run after
1107          * a vblank wait.
1108          */
1109         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1110
1111         if (IS_BROADWELL(dev_priv)) {
1112                 drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1113                                                  IPS_ENABLE | IPS_PCODE_CONTROL));
1114                 /* Quoting Art Runyan: "its not safe to expect any particular
1115                  * value in IPS_CTL bit 31 after enabling IPS through the
1116                  * mailbox." Moreover, the mailbox may return a bogus state,
1117                  * so we need to just enable it and continue on.
1118                  */
1119         } else {
1120                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1121                 /* The bit only becomes 1 in the next vblank, so this wait here
1122                  * is essentially intel_wait_for_vblank. If we don't have this
1123                  * and don't wait for vblanks until the end of crtc_enable, then
1124                  * the HW state readout code will complain that the expected
1125                  * IPS_CTL value is not the one we read. */
1126                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1127                         drm_err(&dev_priv->drm,
1128                                 "Timed out waiting for IPS enable\n");
1129         }
1130 }
1131
1132 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1133 {
1134         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1135         struct drm_device *dev = crtc->base.dev;
1136         struct drm_i915_private *dev_priv = to_i915(dev);
1137
1138         if (!crtc_state->ips_enabled)
1139                 return;
1140
1141         if (IS_BROADWELL(dev_priv)) {
1142                 drm_WARN_ON(dev,
1143                             snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1144                 /*
1145                  * Wait for PCODE to finish disabling IPS. The BSpec specified
1146                  * 42ms timeout value leads to occasional timeouts so use 100ms
1147                  * instead.
1148                  */
1149                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1150                         drm_err(&dev_priv->drm,
1151                                 "Timed out waiting for IPS disable\n");
1152         } else {
1153                 intel_de_write(dev_priv, IPS_CTL, 0);
1154                 intel_de_posting_read(dev_priv, IPS_CTL);
1155         }
1156
1157         /* We need to wait for a vblank before we can disable the plane. */
1158         intel_crtc_wait_for_next_vblank(crtc);
1159 }
1160
1161 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1162 {
1163         if (crtc->overlay)
1164                 (void) intel_overlay_switch_off(crtc->overlay);
1165
1166         /* Let userspace switch the overlay on again. In most cases userspace
1167          * has to recompute where to put it anyway.
1168          */
1169 }
1170
1171 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1172                                        const struct intel_crtc_state *new_crtc_state)
1173 {
1174         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1175         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1176
1177         if (!old_crtc_state->ips_enabled)
1178                 return false;
1179
1180         if (intel_crtc_needs_modeset(new_crtc_state))
1181                 return true;
1182
1183         /*
1184          * Workaround : Do not read or write the pipe palette/gamma data while
1185          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1186          *
1187          * Disable IPS before we program the LUT.
1188          */
1189         if (IS_HASWELL(dev_priv) &&
1190             (new_crtc_state->uapi.color_mgmt_changed ||
1191              new_crtc_state->update_pipe) &&
1192             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1193                 return true;
1194
1195         return !new_crtc_state->ips_enabled;
1196 }
1197
1198 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1199                                        const struct intel_crtc_state *new_crtc_state)
1200 {
1201         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1202         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1203
1204         if (!new_crtc_state->ips_enabled)
1205                 return false;
1206
1207         if (intel_crtc_needs_modeset(new_crtc_state))
1208                 return true;
1209
1210         /*
1211          * Workaround : Do not read or write the pipe palette/gamma data while
1212          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1213          *
1214          * Re-enable IPS after the LUT has been programmed.
1215          */
1216         if (IS_HASWELL(dev_priv) &&
1217             (new_crtc_state->uapi.color_mgmt_changed ||
1218              new_crtc_state->update_pipe) &&
1219             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1220                 return true;
1221
1222         /*
1223          * We can't read out IPS on broadwell, assume the worst and
1224          * forcibly enable IPS on the first fastset.
1225          */
1226         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1227                 return true;
1228
1229         return !old_crtc_state->ips_enabled;
1230 }
1231
1232 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1233 {
1234         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1235
1236         if (!crtc_state->nv12_planes)
1237                 return false;
1238
1239         /* WA Display #0827: Gen9:all */
1240         if (DISPLAY_VER(dev_priv) == 9)
1241                 return true;
1242
1243         return false;
1244 }
1245
1246 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1247 {
1248         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1249
1250         /* Wa_2006604312:icl,ehl */
1251         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1252                 return true;
1253
1254         return false;
1255 }
1256
1257 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1258 {
1259         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1260
1261         /* Wa_1604331009:icl,jsl,ehl */
1262         if (is_hdr_mode(crtc_state) &&
1263             crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1264             DISPLAY_VER(dev_priv) == 11)
1265                 return true;
1266
1267         return false;
1268 }
1269
1270 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1271                                     enum pipe pipe, bool enable)
1272 {
1273         if (DISPLAY_VER(i915) == 9) {
1274                 /*
1275                  * "Plane N strech max must be programmed to 11b (x1)
1276                  *  when Async flips are enabled on that plane."
1277                  */
1278                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1279                              SKL_PLANE1_STRETCH_MAX_MASK,
1280                              enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1281         } else {
1282                 /* Also needed on HSW/BDW albeit undocumented */
1283                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1284                              HSW_PRI_STRETCH_MAX_MASK,
1285                              enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1286         }
1287 }
1288
1289 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1290 {
1291         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1292
1293         return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
1294                 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1295 }
1296
1297 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1298                             const struct intel_crtc_state *new_crtc_state)
1299 {
1300         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1301                 new_crtc_state->active_planes;
1302 }
1303
1304 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1305                              const struct intel_crtc_state *new_crtc_state)
1306 {
1307         return old_crtc_state->active_planes &&
1308                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1309 }
1310
1311 static void intel_post_plane_update(struct intel_atomic_state *state,
1312                                     struct intel_crtc *crtc)
1313 {
1314         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1315         const struct intel_crtc_state *old_crtc_state =
1316                 intel_atomic_get_old_crtc_state(state, crtc);
1317         const struct intel_crtc_state *new_crtc_state =
1318                 intel_atomic_get_new_crtc_state(state, crtc);
1319         enum pipe pipe = crtc->pipe;
1320
1321         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1322
1323         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1324                 intel_update_watermarks(dev_priv);
1325
1326         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1327                 hsw_enable_ips(new_crtc_state);
1328
1329         intel_fbc_post_update(state, crtc);
1330         intel_drrs_page_flip(state, crtc);
1331
1332         if (needs_async_flip_vtd_wa(old_crtc_state) &&
1333             !needs_async_flip_vtd_wa(new_crtc_state))
1334                 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1335
1336         if (needs_nv12_wa(old_crtc_state) &&
1337             !needs_nv12_wa(new_crtc_state))
1338                 skl_wa_827(dev_priv, pipe, false);
1339
1340         if (needs_scalerclk_wa(old_crtc_state) &&
1341             !needs_scalerclk_wa(new_crtc_state))
1342                 icl_wa_scalerclkgating(dev_priv, pipe, false);
1343
1344         if (needs_cursorclk_wa(old_crtc_state) &&
1345             !needs_cursorclk_wa(new_crtc_state))
1346                 icl_wa_cursorclkgating(dev_priv, pipe, false);
1347
1348 }
1349
1350 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1351                                         struct intel_crtc *crtc)
1352 {
1353         const struct intel_crtc_state *crtc_state =
1354                 intel_atomic_get_new_crtc_state(state, crtc);
1355         u8 update_planes = crtc_state->update_planes;
1356         const struct intel_plane_state *plane_state;
1357         struct intel_plane *plane;
1358         int i;
1359
1360         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1361                 if (plane->enable_flip_done &&
1362                     plane->pipe == crtc->pipe &&
1363                     update_planes & BIT(plane->id) &&
1364                     plane_state->do_async_flip)
1365                         plane->enable_flip_done(plane);
1366         }
1367 }
1368
1369 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1370                                          struct intel_crtc *crtc)
1371 {
1372         const struct intel_crtc_state *crtc_state =
1373                 intel_atomic_get_new_crtc_state(state, crtc);
1374         u8 update_planes = crtc_state->update_planes;
1375         const struct intel_plane_state *plane_state;
1376         struct intel_plane *plane;
1377         int i;
1378
1379         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1380                 if (plane->disable_flip_done &&
1381                     plane->pipe == crtc->pipe &&
1382                     update_planes & BIT(plane->id) &&
1383                     plane_state->do_async_flip)
1384                         plane->disable_flip_done(plane);
1385         }
1386 }
1387
1388 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1389                                              struct intel_crtc *crtc)
1390 {
1391         const struct intel_crtc_state *old_crtc_state =
1392                 intel_atomic_get_old_crtc_state(state, crtc);
1393         const struct intel_crtc_state *new_crtc_state =
1394                 intel_atomic_get_new_crtc_state(state, crtc);
1395         u8 update_planes = new_crtc_state->update_planes;
1396         const struct intel_plane_state *old_plane_state;
1397         struct intel_plane *plane;
1398         bool need_vbl_wait = false;
1399         int i;
1400
1401         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1402                 if (plane->need_async_flip_disable_wa &&
1403                     plane->pipe == crtc->pipe &&
1404                     update_planes & BIT(plane->id)) {
1405                         /*
1406                          * Apart from the async flip bit we want to
1407                          * preserve the old state for the plane.
1408                          */
1409                         plane->async_flip(plane, old_crtc_state,
1410                                           old_plane_state, false);
1411                         need_vbl_wait = true;
1412                 }
1413         }
1414
1415         if (need_vbl_wait)
1416                 intel_crtc_wait_for_next_vblank(crtc);
1417 }
1418
1419 static void intel_pre_plane_update(struct intel_atomic_state *state,
1420                                    struct intel_crtc *crtc)
1421 {
1422         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1423         const struct intel_crtc_state *old_crtc_state =
1424                 intel_atomic_get_old_crtc_state(state, crtc);
1425         const struct intel_crtc_state *new_crtc_state =
1426                 intel_atomic_get_new_crtc_state(state, crtc);
1427         enum pipe pipe = crtc->pipe;
1428
1429         intel_psr_pre_plane_update(state, crtc);
1430
1431         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1432                 hsw_disable_ips(old_crtc_state);
1433
1434         if (intel_fbc_pre_update(state, crtc))
1435                 intel_crtc_wait_for_next_vblank(crtc);
1436
1437         if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1438             needs_async_flip_vtd_wa(new_crtc_state))
1439                 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1440
1441         /* Display WA 827 */
1442         if (!needs_nv12_wa(old_crtc_state) &&
1443             needs_nv12_wa(new_crtc_state))
1444                 skl_wa_827(dev_priv, pipe, true);
1445
1446         /* Wa_2006604312:icl,ehl */
1447         if (!needs_scalerclk_wa(old_crtc_state) &&
1448             needs_scalerclk_wa(new_crtc_state))
1449                 icl_wa_scalerclkgating(dev_priv, pipe, true);
1450
1451         /* Wa_1604331009:icl,jsl,ehl */
1452         if (!needs_cursorclk_wa(old_crtc_state) &&
1453             needs_cursorclk_wa(new_crtc_state))
1454                 icl_wa_cursorclkgating(dev_priv, pipe, true);
1455
1456         /*
1457          * Vblank time updates from the shadow to live plane control register
1458          * are blocked if the memory self-refresh mode is active at that
1459          * moment. So to make sure the plane gets truly disabled, disable
1460          * first the self-refresh mode. The self-refresh enable bit in turn
1461          * will be checked/applied by the HW only at the next frame start
1462          * event which is after the vblank start event, so we need to have a
1463          * wait-for-vblank between disabling the plane and the pipe.
1464          */
1465         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1466             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1467                 intel_crtc_wait_for_next_vblank(crtc);
1468
1469         /*
1470          * IVB workaround: must disable low power watermarks for at least
1471          * one frame before enabling scaling.  LP watermarks can be re-enabled
1472          * when scaling is disabled.
1473          *
1474          * WaCxSRDisabledForSpriteScaling:ivb
1475          */
1476         if (old_crtc_state->hw.active &&
1477             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1478                 intel_crtc_wait_for_next_vblank(crtc);
1479
1480         /*
1481          * If we're doing a modeset we don't need to do any
1482          * pre-vblank watermark programming here.
1483          */
1484         if (!intel_crtc_needs_modeset(new_crtc_state)) {
1485                 /*
1486                  * For platforms that support atomic watermarks, program the
1487                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1488                  * will be the intermediate values that are safe for both pre- and
1489                  * post- vblank; when vblank happens, the 'active' values will be set
1490                  * to the final 'target' values and we'll do this again to get the
1491                  * optimal watermarks.  For gen9+ platforms, the values we program here
1492                  * will be the final target values which will get automatically latched
1493                  * at vblank time; no further programming will be necessary.
1494                  *
1495                  * If a platform hasn't been transitioned to atomic watermarks yet,
1496                  * we'll continue to update watermarks the old way, if flags tell
1497                  * us to.
1498                  */
1499                 if (!intel_initial_watermarks(state, crtc))
1500                         if (new_crtc_state->update_wm_pre)
1501                                 intel_update_watermarks(dev_priv);
1502         }
1503
1504         /*
1505          * Gen2 reports pipe underruns whenever all planes are disabled.
1506          * So disable underrun reporting before all the planes get disabled.
1507          *
1508          * We do this after .initial_watermarks() so that we have a
1509          * chance of catching underruns with the intermediate watermarks
1510          * vs. the old plane configuration.
1511          */
1512         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1513                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1514
1515         /*
1516          * WA for platforms where async address update enable bit
1517          * is double buffered and only latched at start of vblank.
1518          */
1519         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1520                 intel_crtc_async_flip_disable_wa(state, crtc);
1521 }
1522
1523 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1524                                       struct intel_crtc *crtc)
1525 {
1526         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1527         const struct intel_crtc_state *new_crtc_state =
1528                 intel_atomic_get_new_crtc_state(state, crtc);
1529         unsigned int update_mask = new_crtc_state->update_planes;
1530         const struct intel_plane_state *old_plane_state;
1531         struct intel_plane *plane;
1532         unsigned fb_bits = 0;
1533         int i;
1534
1535         intel_crtc_dpms_overlay_disable(crtc);
1536
1537         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1538                 if (crtc->pipe != plane->pipe ||
1539                     !(update_mask & BIT(plane->id)))
1540                         continue;
1541
1542                 intel_plane_disable_arm(plane, new_crtc_state);
1543
1544                 if (old_plane_state->uapi.visible)
1545                         fb_bits |= plane->frontbuffer_bit;
1546         }
1547
1548         intel_frontbuffer_flip(dev_priv, fb_bits);
1549 }
1550
1551 /*
1552  * intel_connector_primary_encoder - get the primary encoder for a connector
1553  * @connector: connector for which to return the encoder
1554  *
1555  * Returns the primary encoder for a connector. There is a 1:1 mapping from
1556  * all connectors to their encoder, except for DP-MST connectors which have
1557  * both a virtual and a primary encoder. These DP-MST primary encoders can be
1558  * pointed to by as many DP-MST connectors as there are pipes.
1559  */
1560 static struct intel_encoder *
1561 intel_connector_primary_encoder(struct intel_connector *connector)
1562 {
1563         struct intel_encoder *encoder;
1564
1565         if (connector->mst_port)
1566                 return &dp_to_dig_port(connector->mst_port)->base;
1567
1568         encoder = intel_attached_encoder(connector);
1569         drm_WARN_ON(connector->base.dev, !encoder);
1570
1571         return encoder;
1572 }
1573
1574 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1575 {
1576         struct drm_i915_private *i915 = to_i915(state->base.dev);
1577         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1578         struct intel_crtc *crtc;
1579         struct drm_connector_state *new_conn_state;
1580         struct drm_connector *connector;
1581         int i;
1582
1583         /*
1584          * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1585          * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1586          */
1587         if (i915->dpll.mgr) {
1588                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1589                         if (intel_crtc_needs_modeset(new_crtc_state))
1590                                 continue;
1591
1592                         new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1593                         new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1594                 }
1595         }
1596
1597         if (!state->modeset)
1598                 return;
1599
1600         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1601                                         i) {
1602                 struct intel_connector *intel_connector;
1603                 struct intel_encoder *encoder;
1604                 struct intel_crtc *crtc;
1605
1606                 if (!intel_connector_needs_modeset(state, connector))
1607                         continue;
1608
1609                 intel_connector = to_intel_connector(connector);
1610                 encoder = intel_connector_primary_encoder(intel_connector);
1611                 if (!encoder->update_prepare)
1612                         continue;
1613
1614                 crtc = new_conn_state->crtc ?
1615                         to_intel_crtc(new_conn_state->crtc) : NULL;
1616                 encoder->update_prepare(state, encoder, crtc);
1617         }
1618 }
1619
1620 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1621 {
1622         struct drm_connector_state *new_conn_state;
1623         struct drm_connector *connector;
1624         int i;
1625
1626         if (!state->modeset)
1627                 return;
1628
1629         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1630                                         i) {
1631                 struct intel_connector *intel_connector;
1632                 struct intel_encoder *encoder;
1633                 struct intel_crtc *crtc;
1634
1635                 if (!intel_connector_needs_modeset(state, connector))
1636                         continue;
1637
1638                 intel_connector = to_intel_connector(connector);
1639                 encoder = intel_connector_primary_encoder(intel_connector);
1640                 if (!encoder->update_complete)
1641                         continue;
1642
1643                 crtc = new_conn_state->crtc ?
1644                         to_intel_crtc(new_conn_state->crtc) : NULL;
1645                 encoder->update_complete(state, encoder, crtc);
1646         }
1647 }
1648
1649 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1650                                           struct intel_crtc *crtc)
1651 {
1652         const struct intel_crtc_state *crtc_state =
1653                 intel_atomic_get_new_crtc_state(state, crtc);
1654         const struct drm_connector_state *conn_state;
1655         struct drm_connector *conn;
1656         int i;
1657
1658         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1659                 struct intel_encoder *encoder =
1660                         to_intel_encoder(conn_state->best_encoder);
1661
1662                 if (conn_state->crtc != &crtc->base)
1663                         continue;
1664
1665                 if (encoder->pre_pll_enable)
1666                         encoder->pre_pll_enable(state, encoder,
1667                                                 crtc_state, conn_state);
1668         }
1669 }
1670
1671 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1672                                       struct intel_crtc *crtc)
1673 {
1674         const struct intel_crtc_state *crtc_state =
1675                 intel_atomic_get_new_crtc_state(state, crtc);
1676         const struct drm_connector_state *conn_state;
1677         struct drm_connector *conn;
1678         int i;
1679
1680         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1681                 struct intel_encoder *encoder =
1682                         to_intel_encoder(conn_state->best_encoder);
1683
1684                 if (conn_state->crtc != &crtc->base)
1685                         continue;
1686
1687                 if (encoder->pre_enable)
1688                         encoder->pre_enable(state, encoder,
1689                                             crtc_state, conn_state);
1690         }
1691 }
1692
1693 static void intel_encoders_enable(struct intel_atomic_state *state,
1694                                   struct intel_crtc *crtc)
1695 {
1696         const struct intel_crtc_state *crtc_state =
1697                 intel_atomic_get_new_crtc_state(state, crtc);
1698         const struct drm_connector_state *conn_state;
1699         struct drm_connector *conn;
1700         int i;
1701
1702         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1703                 struct intel_encoder *encoder =
1704                         to_intel_encoder(conn_state->best_encoder);
1705
1706                 if (conn_state->crtc != &crtc->base)
1707                         continue;
1708
1709                 if (encoder->enable)
1710                         encoder->enable(state, encoder,
1711                                         crtc_state, conn_state);
1712                 intel_opregion_notify_encoder(encoder, true);
1713         }
1714 }
1715
1716 static void intel_encoders_disable(struct intel_atomic_state *state,
1717                                    struct intel_crtc *crtc)
1718 {
1719         const struct intel_crtc_state *old_crtc_state =
1720                 intel_atomic_get_old_crtc_state(state, crtc);
1721         const struct drm_connector_state *old_conn_state;
1722         struct drm_connector *conn;
1723         int i;
1724
1725         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1726                 struct intel_encoder *encoder =
1727                         to_intel_encoder(old_conn_state->best_encoder);
1728
1729                 if (old_conn_state->crtc != &crtc->base)
1730                         continue;
1731
1732                 intel_opregion_notify_encoder(encoder, false);
1733                 if (encoder->disable)
1734                         encoder->disable(state, encoder,
1735                                          old_crtc_state, old_conn_state);
1736         }
1737 }
1738
1739 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1740                                         struct intel_crtc *crtc)
1741 {
1742         const struct intel_crtc_state *old_crtc_state =
1743                 intel_atomic_get_old_crtc_state(state, crtc);
1744         const struct drm_connector_state *old_conn_state;
1745         struct drm_connector *conn;
1746         int i;
1747
1748         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1749                 struct intel_encoder *encoder =
1750                         to_intel_encoder(old_conn_state->best_encoder);
1751
1752                 if (old_conn_state->crtc != &crtc->base)
1753                         continue;
1754
1755                 if (encoder->post_disable)
1756                         encoder->post_disable(state, encoder,
1757                                               old_crtc_state, old_conn_state);
1758         }
1759 }
1760
1761 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1762                                             struct intel_crtc *crtc)
1763 {
1764         const struct intel_crtc_state *old_crtc_state =
1765                 intel_atomic_get_old_crtc_state(state, crtc);
1766         const struct drm_connector_state *old_conn_state;
1767         struct drm_connector *conn;
1768         int i;
1769
1770         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1771                 struct intel_encoder *encoder =
1772                         to_intel_encoder(old_conn_state->best_encoder);
1773
1774                 if (old_conn_state->crtc != &crtc->base)
1775                         continue;
1776
1777                 if (encoder->post_pll_disable)
1778                         encoder->post_pll_disable(state, encoder,
1779                                                   old_crtc_state, old_conn_state);
1780         }
1781 }
1782
1783 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1784                                        struct intel_crtc *crtc)
1785 {
1786         const struct intel_crtc_state *crtc_state =
1787                 intel_atomic_get_new_crtc_state(state, crtc);
1788         const struct drm_connector_state *conn_state;
1789         struct drm_connector *conn;
1790         int i;
1791
1792         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1793                 struct intel_encoder *encoder =
1794                         to_intel_encoder(conn_state->best_encoder);
1795
1796                 if (conn_state->crtc != &crtc->base)
1797                         continue;
1798
1799                 if (encoder->update_pipe)
1800                         encoder->update_pipe(state, encoder,
1801                                              crtc_state, conn_state);
1802         }
1803 }
1804
1805 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1806 {
1807         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1808         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1809
1810         plane->disable_arm(plane, crtc_state);
1811 }
1812
1813 static void ilk_crtc_enable(struct intel_atomic_state *state,
1814                             struct intel_crtc *crtc)
1815 {
1816         const struct intel_crtc_state *new_crtc_state =
1817                 intel_atomic_get_new_crtc_state(state, crtc);
1818         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1819         enum pipe pipe = crtc->pipe;
1820
1821         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1822                 return;
1823
1824         /*
1825          * Sometimes spurious CPU pipe underruns happen during FDI
1826          * training, at least with VGA+HDMI cloning. Suppress them.
1827          *
1828          * On ILK we get an occasional spurious CPU pipe underruns
1829          * between eDP port A enable and vdd enable. Also PCH port
1830          * enable seems to result in the occasional CPU pipe underrun.
1831          *
1832          * Spurious PCH underruns also occur during PCH enabling.
1833          */
1834         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1835         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1836
1837         if (intel_crtc_has_dp_encoder(new_crtc_state)) {
1838                 if (new_crtc_state->has_pch_encoder)
1839                         intel_pch_transcoder_set_m_n(new_crtc_state,
1840                                                      &new_crtc_state->dp_m_n);
1841                 else
1842                         intel_cpu_transcoder_set_m_n(new_crtc_state,
1843                                                      &new_crtc_state->dp_m_n,
1844                                                      &new_crtc_state->dp_m2_n2);
1845         }
1846
1847         intel_set_transcoder_timings(new_crtc_state);
1848         intel_set_pipe_src_size(new_crtc_state);
1849
1850         if (new_crtc_state->has_pch_encoder)
1851                 intel_cpu_transcoder_set_m_n(new_crtc_state,
1852                                              &new_crtc_state->fdi_m_n, NULL);
1853
1854         ilk_set_pipeconf(new_crtc_state);
1855
1856         crtc->active = true;
1857
1858         intel_encoders_pre_enable(state, crtc);
1859
1860         if (new_crtc_state->has_pch_encoder) {
1861                 ilk_pch_pre_enable(state, crtc);
1862         } else {
1863                 assert_fdi_tx_disabled(dev_priv, pipe);
1864                 assert_fdi_rx_disabled(dev_priv, pipe);
1865         }
1866
1867         ilk_pfit_enable(new_crtc_state);
1868
1869         /*
1870          * On ILK+ LUT must be loaded before the pipe is running but with
1871          * clocks enabled
1872          */
1873         intel_color_load_luts(new_crtc_state);
1874         intel_color_commit(new_crtc_state);
1875         /* update DSPCNTR to configure gamma for pipe bottom color */
1876         intel_disable_primary_plane(new_crtc_state);
1877
1878         intel_initial_watermarks(state, crtc);
1879         intel_enable_transcoder(new_crtc_state);
1880
1881         if (new_crtc_state->has_pch_encoder)
1882                 ilk_pch_enable(state, crtc);
1883
1884         intel_crtc_vblank_on(new_crtc_state);
1885
1886         intel_encoders_enable(state, crtc);
1887
1888         if (HAS_PCH_CPT(dev_priv))
1889                 cpt_verify_modeset(dev_priv, pipe);
1890
1891         /*
1892          * Must wait for vblank to avoid spurious PCH FIFO underruns.
1893          * And a second vblank wait is needed at least on ILK with
1894          * some interlaced HDMI modes. Let's do the double wait always
1895          * in case there are more corner cases we don't know about.
1896          */
1897         if (new_crtc_state->has_pch_encoder) {
1898                 intel_crtc_wait_for_next_vblank(crtc);
1899                 intel_crtc_wait_for_next_vblank(crtc);
1900         }
1901         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1902         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1903 }
1904
1905 /* IPS only exists on ULT machines and is tied to pipe A. */
1906 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1907 {
1908         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1909 }
1910
1911 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1912                                             enum pipe pipe, bool apply)
1913 {
1914         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1915         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1916
1917         if (apply)
1918                 val |= mask;
1919         else
1920                 val &= ~mask;
1921
1922         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1923 }
1924
1925 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1926 {
1927         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1928         enum pipe pipe = crtc->pipe;
1929         u32 val;
1930
1931         /* Wa_22010947358:adl-p */
1932         if (IS_ALDERLAKE_P(dev_priv))
1933                 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1934         else
1935                 val = MBUS_DBOX_A_CREDIT(2);
1936
1937         if (DISPLAY_VER(dev_priv) >= 12) {
1938                 val |= MBUS_DBOX_BW_CREDIT(2);
1939                 val |= MBUS_DBOX_B_CREDIT(12);
1940         } else {
1941                 val |= MBUS_DBOX_BW_CREDIT(1);
1942                 val |= MBUS_DBOX_B_CREDIT(8);
1943         }
1944
1945         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1946 }
1947
1948 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1949 {
1950         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1951         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1952
1953         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1954                        HSW_LINETIME(crtc_state->linetime) |
1955                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
1956 }
1957
1958 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1959 {
1960         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1961         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1962         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1963         u32 val;
1964
1965         val = intel_de_read(dev_priv, reg);
1966         val &= ~HSW_FRAME_START_DELAY_MASK;
1967         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1968         intel_de_write(dev_priv, reg, val);
1969 }
1970
1971 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1972                                          const struct intel_crtc_state *crtc_state)
1973 {
1974         struct intel_crtc_state *master_crtc_state;
1975         struct intel_crtc *master_crtc;
1976         struct drm_connector_state *conn_state;
1977         struct drm_connector *conn;
1978         struct intel_encoder *encoder = NULL;
1979         int i;
1980
1981         master_crtc = intel_master_crtc(crtc_state);
1982         master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1983
1984         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1985                 if (conn_state->crtc != &master_crtc->base)
1986                         continue;
1987
1988                 encoder = to_intel_encoder(conn_state->best_encoder);
1989                 break;
1990         }
1991
1992         /*
1993          * Enable sequence steps 1-7 on bigjoiner master
1994          */
1995         if (crtc_state->bigjoiner_slave)
1996                 intel_encoders_pre_pll_enable(state, master_crtc);
1997
1998         if (crtc_state->shared_dpll)
1999                 intel_enable_shared_dpll(crtc_state);
2000
2001         if (crtc_state->bigjoiner_slave)
2002                 intel_encoders_pre_enable(state, master_crtc);
2003 }
2004
2005 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2006 {
2007         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2008         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2009         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2010
2011         intel_set_transcoder_timings(crtc_state);
2012
2013         if (cpu_transcoder != TRANSCODER_EDP)
2014                 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2015                                crtc_state->pixel_multiplier - 1);
2016
2017         if (crtc_state->has_pch_encoder)
2018                 intel_cpu_transcoder_set_m_n(crtc_state,
2019                                              &crtc_state->fdi_m_n, NULL);
2020
2021         hsw_set_frame_start_delay(crtc_state);
2022
2023         hsw_set_transconf(crtc_state);
2024 }
2025
2026 static void hsw_crtc_enable(struct intel_atomic_state *state,
2027                             struct intel_crtc *crtc)
2028 {
2029         const struct intel_crtc_state *new_crtc_state =
2030                 intel_atomic_get_new_crtc_state(state, crtc);
2031         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2032         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2033         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2034         bool psl_clkgate_wa;
2035
2036         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2037                 return;
2038
2039         if (!new_crtc_state->bigjoiner) {
2040                 intel_encoders_pre_pll_enable(state, crtc);
2041
2042                 if (new_crtc_state->shared_dpll)
2043                         intel_enable_shared_dpll(new_crtc_state);
2044
2045                 intel_encoders_pre_enable(state, crtc);
2046         } else {
2047                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2048         }
2049
2050         intel_dsc_enable(new_crtc_state);
2051
2052         if (DISPLAY_VER(dev_priv) >= 13)
2053                 intel_uncompressed_joiner_enable(new_crtc_state);
2054
2055         intel_set_pipe_src_size(new_crtc_state);
2056         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2057                 bdw_set_pipemisc(new_crtc_state);
2058
2059         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder))
2060                 hsw_configure_cpu_transcoder(new_crtc_state);
2061
2062         crtc->active = true;
2063
2064         /* Display WA #1180: WaDisableScalarClockGating: glk */
2065         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2066                 new_crtc_state->pch_pfit.enabled;
2067         if (psl_clkgate_wa)
2068                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2069
2070         if (DISPLAY_VER(dev_priv) >= 9)
2071                 skl_pfit_enable(new_crtc_state);
2072         else
2073                 ilk_pfit_enable(new_crtc_state);
2074
2075         /*
2076          * On ILK+ LUT must be loaded before the pipe is running but with
2077          * clocks enabled
2078          */
2079         intel_color_load_luts(new_crtc_state);
2080         intel_color_commit(new_crtc_state);
2081         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
2082         if (DISPLAY_VER(dev_priv) < 9)
2083                 intel_disable_primary_plane(new_crtc_state);
2084
2085         hsw_set_linetime_wm(new_crtc_state);
2086
2087         if (DISPLAY_VER(dev_priv) >= 11)
2088                 icl_set_pipe_chicken(new_crtc_state);
2089
2090         intel_initial_watermarks(state, crtc);
2091
2092         if (DISPLAY_VER(dev_priv) >= 11) {
2093                 const struct intel_dbuf_state *dbuf_state =
2094                                 intel_atomic_get_new_dbuf_state(state);
2095
2096                 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2097         }
2098
2099         if (new_crtc_state->bigjoiner_slave)
2100                 intel_crtc_vblank_on(new_crtc_state);
2101
2102         intel_encoders_enable(state, crtc);
2103
2104         if (psl_clkgate_wa) {
2105                 intel_crtc_wait_for_next_vblank(crtc);
2106                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2107         }
2108
2109         /* If we change the relative order between pipe/planes enabling, we need
2110          * to change the workaround. */
2111         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2112         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2113                 struct intel_crtc *wa_crtc;
2114
2115                 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
2116
2117                 intel_crtc_wait_for_next_vblank(wa_crtc);
2118                 intel_crtc_wait_for_next_vblank(wa_crtc);
2119         }
2120 }
2121
2122 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2123 {
2124         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2125         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2126         enum pipe pipe = crtc->pipe;
2127
2128         /* To avoid upsetting the power well on haswell only disable the pfit if
2129          * it's in use. The hw state code will make sure we get this right. */
2130         if (!old_crtc_state->pch_pfit.enabled)
2131                 return;
2132
2133         intel_de_write(dev_priv, PF_CTL(pipe), 0);
2134         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2135         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2136 }
2137
2138 static void ilk_crtc_disable(struct intel_atomic_state *state,
2139                              struct intel_crtc *crtc)
2140 {
2141         const struct intel_crtc_state *old_crtc_state =
2142                 intel_atomic_get_old_crtc_state(state, crtc);
2143         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2144         enum pipe pipe = crtc->pipe;
2145
2146         /*
2147          * Sometimes spurious CPU pipe underruns happen when the
2148          * pipe is already disabled, but FDI RX/TX is still enabled.
2149          * Happens at least with VGA+HDMI cloning. Suppress them.
2150          */
2151         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2152         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2153
2154         intel_encoders_disable(state, crtc);
2155
2156         intel_crtc_vblank_off(old_crtc_state);
2157
2158         intel_disable_transcoder(old_crtc_state);
2159
2160         ilk_pfit_disable(old_crtc_state);
2161
2162         if (old_crtc_state->has_pch_encoder)
2163                 ilk_pch_disable(state, crtc);
2164
2165         intel_encoders_post_disable(state, crtc);
2166
2167         if (old_crtc_state->has_pch_encoder)
2168                 ilk_pch_post_disable(state, crtc);
2169
2170         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2171         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2172 }
2173
2174 static void hsw_crtc_disable(struct intel_atomic_state *state,
2175                              struct intel_crtc *crtc)
2176 {
2177         const struct intel_crtc_state *old_crtc_state =
2178                 intel_atomic_get_old_crtc_state(state, crtc);
2179
2180         /*
2181          * FIXME collapse everything to one hook.
2182          * Need care with mst->ddi interactions.
2183          */
2184         if (!old_crtc_state->bigjoiner_slave) {
2185                 intel_encoders_disable(state, crtc);
2186                 intel_encoders_post_disable(state, crtc);
2187         }
2188 }
2189
2190 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2191 {
2192         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2193         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2194
2195         if (!crtc_state->gmch_pfit.control)
2196                 return;
2197
2198         /*
2199          * The panel fitter should only be adjusted whilst the pipe is disabled,
2200          * according to register description and PRM.
2201          */
2202         drm_WARN_ON(&dev_priv->drm,
2203                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2204         assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2205
2206         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2207                        crtc_state->gmch_pfit.pgm_ratios);
2208         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2209
2210         /* Border color in case we don't scale up to the full screen. Black by
2211          * default, change to something else for debugging. */
2212         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2213 }
2214
2215 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2216 {
2217         if (phy == PHY_NONE)
2218                 return false;
2219         else if (IS_DG2(dev_priv))
2220                 /*
2221                  * DG2 outputs labelled as "combo PHY" in the bspec use
2222                  * SNPS PHYs with completely different programming,
2223                  * hence we always return false here.
2224                  */
2225                 return false;
2226         else if (IS_ALDERLAKE_S(dev_priv))
2227                 return phy <= PHY_E;
2228         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2229                 return phy <= PHY_D;
2230         else if (IS_JSL_EHL(dev_priv))
2231                 return phy <= PHY_C;
2232         else if (DISPLAY_VER(dev_priv) >= 11)
2233                 return phy <= PHY_B;
2234         else
2235                 return false;
2236 }
2237
2238 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2239 {
2240         if (IS_DG2(dev_priv))
2241                 /* DG2's "TC1" output uses a SNPS PHY */
2242                 return false;
2243         else if (IS_ALDERLAKE_P(dev_priv))
2244                 return phy >= PHY_F && phy <= PHY_I;
2245         else if (IS_TIGERLAKE(dev_priv))
2246                 return phy >= PHY_D && phy <= PHY_I;
2247         else if (IS_ICELAKE(dev_priv))
2248                 return phy >= PHY_C && phy <= PHY_F;
2249         else
2250                 return false;
2251 }
2252
2253 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2254 {
2255         if (phy == PHY_NONE)
2256                 return false;
2257         else if (IS_DG2(dev_priv))
2258                 /*
2259                  * All four "combo" ports and the TC1 port (PHY E) use
2260                  * Synopsis PHYs.
2261                  */
2262                 return phy <= PHY_E;
2263
2264         return false;
2265 }
2266
2267 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2268 {
2269         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2270                 return PHY_D + port - PORT_D_XELPD;
2271         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2272                 return PHY_F + port - PORT_TC1;
2273         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2274                 return PHY_B + port - PORT_TC1;
2275         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2276                 return PHY_C + port - PORT_TC1;
2277         else if (IS_JSL_EHL(i915) && port == PORT_D)
2278                 return PHY_A;
2279
2280         return PHY_A + port - PORT_A;
2281 }
2282
2283 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2284 {
2285         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2286                 return TC_PORT_NONE;
2287
2288         if (DISPLAY_VER(dev_priv) >= 12)
2289                 return TC_PORT_1 + port - PORT_TC1;
2290         else
2291                 return TC_PORT_1 + port - PORT_C;
2292 }
2293
2294 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2295 {
2296         switch (port) {
2297         case PORT_A:
2298                 return POWER_DOMAIN_PORT_DDI_A_LANES;
2299         case PORT_B:
2300                 return POWER_DOMAIN_PORT_DDI_B_LANES;
2301         case PORT_C:
2302                 return POWER_DOMAIN_PORT_DDI_C_LANES;
2303         case PORT_D:
2304                 return POWER_DOMAIN_PORT_DDI_D_LANES;
2305         case PORT_E:
2306                 return POWER_DOMAIN_PORT_DDI_E_LANES;
2307         case PORT_F:
2308                 return POWER_DOMAIN_PORT_DDI_F_LANES;
2309         case PORT_G:
2310                 return POWER_DOMAIN_PORT_DDI_G_LANES;
2311         case PORT_H:
2312                 return POWER_DOMAIN_PORT_DDI_H_LANES;
2313         case PORT_I:
2314                 return POWER_DOMAIN_PORT_DDI_I_LANES;
2315         default:
2316                 MISSING_CASE(port);
2317                 return POWER_DOMAIN_PORT_OTHER;
2318         }
2319 }
2320
2321 enum intel_display_power_domain
2322 intel_aux_power_domain(struct intel_digital_port *dig_port)
2323 {
2324         if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2325                 switch (dig_port->aux_ch) {
2326                 case AUX_CH_C:
2327                         return POWER_DOMAIN_AUX_C_TBT;
2328                 case AUX_CH_D:
2329                         return POWER_DOMAIN_AUX_D_TBT;
2330                 case AUX_CH_E:
2331                         return POWER_DOMAIN_AUX_E_TBT;
2332                 case AUX_CH_F:
2333                         return POWER_DOMAIN_AUX_F_TBT;
2334                 case AUX_CH_G:
2335                         return POWER_DOMAIN_AUX_G_TBT;
2336                 case AUX_CH_H:
2337                         return POWER_DOMAIN_AUX_H_TBT;
2338                 case AUX_CH_I:
2339                         return POWER_DOMAIN_AUX_I_TBT;
2340                 default:
2341                         MISSING_CASE(dig_port->aux_ch);
2342                         return POWER_DOMAIN_AUX_C_TBT;
2343                 }
2344         }
2345
2346         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2347 }
2348
2349 /*
2350  * Converts aux_ch to power_domain without caring about TBT ports for that use
2351  * intel_aux_power_domain()
2352  */
2353 enum intel_display_power_domain
2354 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2355 {
2356         switch (aux_ch) {
2357         case AUX_CH_A:
2358                 return POWER_DOMAIN_AUX_A;
2359         case AUX_CH_B:
2360                 return POWER_DOMAIN_AUX_B;
2361         case AUX_CH_C:
2362                 return POWER_DOMAIN_AUX_C;
2363         case AUX_CH_D:
2364                 return POWER_DOMAIN_AUX_D;
2365         case AUX_CH_E:
2366                 return POWER_DOMAIN_AUX_E;
2367         case AUX_CH_F:
2368                 return POWER_DOMAIN_AUX_F;
2369         case AUX_CH_G:
2370                 return POWER_DOMAIN_AUX_G;
2371         case AUX_CH_H:
2372                 return POWER_DOMAIN_AUX_H;
2373         case AUX_CH_I:
2374                 return POWER_DOMAIN_AUX_I;
2375         default:
2376                 MISSING_CASE(aux_ch);
2377                 return POWER_DOMAIN_AUX_A;
2378         }
2379 }
2380
2381 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2382 {
2383         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2384         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2385         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2386         struct drm_encoder *encoder;
2387         enum pipe pipe = crtc->pipe;
2388         u64 mask;
2389
2390         if (!crtc_state->hw.active)
2391                 return 0;
2392
2393         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2394         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2395         if (crtc_state->pch_pfit.enabled ||
2396             crtc_state->pch_pfit.force_thru)
2397                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2398
2399         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2400                                   crtc_state->uapi.encoder_mask) {
2401                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2402
2403                 mask |= BIT_ULL(intel_encoder->power_domain);
2404         }
2405
2406         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2407                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2408
2409         if (crtc_state->shared_dpll)
2410                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2411
2412         if (crtc_state->dsc.compression_enable)
2413                 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2414
2415         return mask;
2416 }
2417
2418 static u64
2419 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2420 {
2421         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2422         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2423         enum intel_display_power_domain domain;
2424         u64 domains, new_domains, old_domains;
2425
2426         domains = get_crtc_power_domains(crtc_state);
2427
2428         new_domains = domains & ~crtc->enabled_power_domains.mask;
2429         old_domains = crtc->enabled_power_domains.mask & ~domains;
2430
2431         for_each_power_domain(domain, new_domains)
2432                 intel_display_power_get_in_set(dev_priv,
2433                                                &crtc->enabled_power_domains,
2434                                                domain);
2435
2436         return old_domains;
2437 }
2438
2439 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2440                                            u64 domains)
2441 {
2442         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2443                                             &crtc->enabled_power_domains,
2444                                             domains);
2445 }
2446
2447 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2448                                    struct intel_crtc *crtc)
2449 {
2450         const struct intel_crtc_state *new_crtc_state =
2451                 intel_atomic_get_new_crtc_state(state, crtc);
2452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2453         enum pipe pipe = crtc->pipe;
2454
2455         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2456                 return;
2457
2458         if (intel_crtc_has_dp_encoder(new_crtc_state))
2459                 intel_cpu_transcoder_set_m_n(new_crtc_state,
2460                                              &new_crtc_state->dp_m_n,
2461                                              &new_crtc_state->dp_m2_n2);
2462
2463         intel_set_transcoder_timings(new_crtc_state);
2464         intel_set_pipe_src_size(new_crtc_state);
2465
2466         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2467                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2468                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2469         }
2470
2471         i9xx_set_pipeconf(new_crtc_state);
2472
2473         crtc->active = true;
2474
2475         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2476
2477         intel_encoders_pre_pll_enable(state, crtc);
2478
2479         if (IS_CHERRYVIEW(dev_priv))
2480                 chv_enable_pll(new_crtc_state);
2481         else
2482                 vlv_enable_pll(new_crtc_state);
2483
2484         intel_encoders_pre_enable(state, crtc);
2485
2486         i9xx_pfit_enable(new_crtc_state);
2487
2488         intel_color_load_luts(new_crtc_state);
2489         intel_color_commit(new_crtc_state);
2490         /* update DSPCNTR to configure gamma for pipe bottom color */
2491         intel_disable_primary_plane(new_crtc_state);
2492
2493         intel_initial_watermarks(state, crtc);
2494         intel_enable_transcoder(new_crtc_state);
2495
2496         intel_crtc_vblank_on(new_crtc_state);
2497
2498         intel_encoders_enable(state, crtc);
2499 }
2500
2501 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2502                              struct intel_crtc *crtc)
2503 {
2504         const struct intel_crtc_state *new_crtc_state =
2505                 intel_atomic_get_new_crtc_state(state, crtc);
2506         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2507         enum pipe pipe = crtc->pipe;
2508
2509         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2510                 return;
2511
2512         if (intel_crtc_has_dp_encoder(new_crtc_state))
2513                 intel_cpu_transcoder_set_m_n(new_crtc_state,
2514                                              &new_crtc_state->dp_m_n,
2515                                              &new_crtc_state->dp_m2_n2);
2516
2517         intel_set_transcoder_timings(new_crtc_state);
2518         intel_set_pipe_src_size(new_crtc_state);
2519
2520         i9xx_set_pipeconf(new_crtc_state);
2521
2522         crtc->active = true;
2523
2524         if (DISPLAY_VER(dev_priv) != 2)
2525                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2526
2527         intel_encoders_pre_enable(state, crtc);
2528
2529         i9xx_enable_pll(new_crtc_state);
2530
2531         i9xx_pfit_enable(new_crtc_state);
2532
2533         intel_color_load_luts(new_crtc_state);
2534         intel_color_commit(new_crtc_state);
2535         /* update DSPCNTR to configure gamma for pipe bottom color */
2536         intel_disable_primary_plane(new_crtc_state);
2537
2538         if (!intel_initial_watermarks(state, crtc))
2539                 intel_update_watermarks(dev_priv);
2540         intel_enable_transcoder(new_crtc_state);
2541
2542         intel_crtc_vblank_on(new_crtc_state);
2543
2544         intel_encoders_enable(state, crtc);
2545
2546         /* prevents spurious underruns */
2547         if (DISPLAY_VER(dev_priv) == 2)
2548                 intel_crtc_wait_for_next_vblank(crtc);
2549 }
2550
2551 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2552 {
2553         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2554         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2555
2556         if (!old_crtc_state->gmch_pfit.control)
2557                 return;
2558
2559         assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2560
2561         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2562                     intel_de_read(dev_priv, PFIT_CONTROL));
2563         intel_de_write(dev_priv, PFIT_CONTROL, 0);
2564 }
2565
2566 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2567                               struct intel_crtc *crtc)
2568 {
2569         struct intel_crtc_state *old_crtc_state =
2570                 intel_atomic_get_old_crtc_state(state, crtc);
2571         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2572         enum pipe pipe = crtc->pipe;
2573
2574         /*
2575          * On gen2 planes are double buffered but the pipe isn't, so we must
2576          * wait for planes to fully turn off before disabling the pipe.
2577          */
2578         if (DISPLAY_VER(dev_priv) == 2)
2579                 intel_crtc_wait_for_next_vblank(crtc);
2580
2581         intel_encoders_disable(state, crtc);
2582
2583         intel_crtc_vblank_off(old_crtc_state);
2584
2585         intel_disable_transcoder(old_crtc_state);
2586
2587         i9xx_pfit_disable(old_crtc_state);
2588
2589         intel_encoders_post_disable(state, crtc);
2590
2591         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2592                 if (IS_CHERRYVIEW(dev_priv))
2593                         chv_disable_pll(dev_priv, pipe);
2594                 else if (IS_VALLEYVIEW(dev_priv))
2595                         vlv_disable_pll(dev_priv, pipe);
2596                 else
2597                         i9xx_disable_pll(old_crtc_state);
2598         }
2599
2600         intel_encoders_post_pll_disable(state, crtc);
2601
2602         if (DISPLAY_VER(dev_priv) != 2)
2603                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2604
2605         if (!dev_priv->wm_disp->initial_watermarks)
2606                 intel_update_watermarks(dev_priv);
2607
2608         /* clock the pipe down to 640x480@60 to potentially save power */
2609         if (IS_I830(dev_priv))
2610                 i830_enable_pipe(dev_priv, pipe);
2611 }
2612
2613 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2614                                         struct drm_modeset_acquire_ctx *ctx)
2615 {
2616         struct intel_encoder *encoder;
2617         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2618         struct intel_bw_state *bw_state =
2619                 to_intel_bw_state(dev_priv->bw_obj.state);
2620         struct intel_cdclk_state *cdclk_state =
2621                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2622         struct intel_dbuf_state *dbuf_state =
2623                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2624         struct intel_crtc_state *crtc_state =
2625                 to_intel_crtc_state(crtc->base.state);
2626         struct intel_plane *plane;
2627         struct drm_atomic_state *state;
2628         struct intel_crtc_state *temp_crtc_state;
2629         enum pipe pipe = crtc->pipe;
2630         int ret;
2631
2632         if (!crtc_state->hw.active)
2633                 return;
2634
2635         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2636                 const struct intel_plane_state *plane_state =
2637                         to_intel_plane_state(plane->base.state);
2638
2639                 if (plane_state->uapi.visible)
2640                         intel_plane_disable_noatomic(crtc, plane);
2641         }
2642
2643         state = drm_atomic_state_alloc(&dev_priv->drm);
2644         if (!state) {
2645                 drm_dbg_kms(&dev_priv->drm,
2646                             "failed to disable [CRTC:%d:%s], out of memory",
2647                             crtc->base.base.id, crtc->base.name);
2648                 return;
2649         }
2650
2651         state->acquire_ctx = ctx;
2652
2653         /* Everything's already locked, -EDEADLK can't happen. */
2654         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2655         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2656
2657         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2658
2659         dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2660
2661         drm_atomic_state_put(state);
2662
2663         drm_dbg_kms(&dev_priv->drm,
2664                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2665                     crtc->base.base.id, crtc->base.name);
2666
2667         crtc->active = false;
2668         crtc->base.enabled = false;
2669
2670         drm_WARN_ON(&dev_priv->drm,
2671                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2672         crtc_state->uapi.active = false;
2673         crtc_state->uapi.connector_mask = 0;
2674         crtc_state->uapi.encoder_mask = 0;
2675         intel_crtc_free_hw_state(crtc_state);
2676         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2677
2678         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2679                 encoder->base.crtc = NULL;
2680
2681         intel_fbc_disable(crtc);
2682         intel_update_watermarks(dev_priv);
2683         intel_disable_shared_dpll(crtc_state);
2684
2685         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2686
2687         cdclk_state->min_cdclk[pipe] = 0;
2688         cdclk_state->min_voltage_level[pipe] = 0;
2689         cdclk_state->active_pipes &= ~BIT(pipe);
2690
2691         dbuf_state->active_pipes &= ~BIT(pipe);
2692
2693         bw_state->data_rate[pipe] = 0;
2694         bw_state->num_active_planes[pipe] = 0;
2695 }
2696
2697 /*
2698  * turn all crtc's off, but do not adjust state
2699  * This has to be paired with a call to intel_modeset_setup_hw_state.
2700  */
2701 int intel_display_suspend(struct drm_device *dev)
2702 {
2703         struct drm_i915_private *dev_priv = to_i915(dev);
2704         struct drm_atomic_state *state;
2705         int ret;
2706
2707         if (!HAS_DISPLAY(dev_priv))
2708                 return 0;
2709
2710         state = drm_atomic_helper_suspend(dev);
2711         ret = PTR_ERR_OR_ZERO(state);
2712         if (ret)
2713                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2714                         ret);
2715         else
2716                 dev_priv->modeset_restore_state = state;
2717         return ret;
2718 }
2719
2720 void intel_encoder_destroy(struct drm_encoder *encoder)
2721 {
2722         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2723
2724         drm_encoder_cleanup(encoder);
2725         kfree(intel_encoder);
2726 }
2727
2728 /* Cross check the actual hw state with our own modeset state tracking (and it's
2729  * internal consistency). */
2730 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2731                                          struct drm_connector_state *conn_state)
2732 {
2733         struct intel_connector *connector = to_intel_connector(conn_state->connector);
2734         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2735
2736         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2737                     connector->base.base.id, connector->base.name);
2738
2739         if (connector->get_hw_state(connector)) {
2740                 struct intel_encoder *encoder = intel_attached_encoder(connector);
2741
2742                 I915_STATE_WARN(!crtc_state,
2743                          "connector enabled without attached crtc\n");
2744
2745                 if (!crtc_state)
2746                         return;
2747
2748                 I915_STATE_WARN(!crtc_state->hw.active,
2749                                 "connector is active, but attached crtc isn't\n");
2750
2751                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2752                         return;
2753
2754                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2755                         "atomic encoder doesn't match attached encoder\n");
2756
2757                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2758                         "attached encoder crtc differs from connector crtc\n");
2759         } else {
2760                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2761                                 "attached crtc is active, but connector isn't\n");
2762                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2763                         "best encoder set without crtc!\n");
2764         }
2765 }
2766
2767 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2768 {
2769         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2770         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2771
2772         /* IPS only exists on ULT machines and is tied to pipe A. */
2773         if (!hsw_crtc_supports_ips(crtc))
2774                 return false;
2775
2776         if (!dev_priv->params.enable_ips)
2777                 return false;
2778
2779         if (crtc_state->pipe_bpp > 24)
2780                 return false;
2781
2782         /*
2783          * We compare against max which means we must take
2784          * the increased cdclk requirement into account when
2785          * calculating the new cdclk.
2786          *
2787          * Should measure whether using a lower cdclk w/o IPS
2788          */
2789         if (IS_BROADWELL(dev_priv) &&
2790             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2791                 return false;
2792
2793         return true;
2794 }
2795
2796 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2797 {
2798         struct drm_i915_private *dev_priv =
2799                 to_i915(crtc_state->uapi.crtc->dev);
2800         struct intel_atomic_state *state =
2801                 to_intel_atomic_state(crtc_state->uapi.state);
2802
2803         crtc_state->ips_enabled = false;
2804
2805         if (!hsw_crtc_state_ips_capable(crtc_state))
2806                 return 0;
2807
2808         /*
2809          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2810          * enabled and disabled dynamically based on package C states,
2811          * user space can't make reliable use of the CRCs, so let's just
2812          * completely disable it.
2813          */
2814         if (crtc_state->crc_enabled)
2815                 return 0;
2816
2817         /* IPS should be fine as long as at least one plane is enabled. */
2818         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2819                 return 0;
2820
2821         if (IS_BROADWELL(dev_priv)) {
2822                 const struct intel_cdclk_state *cdclk_state;
2823
2824                 cdclk_state = intel_atomic_get_cdclk_state(state);
2825                 if (IS_ERR(cdclk_state))
2826                         return PTR_ERR(cdclk_state);
2827
2828                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2829                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2830                         return 0;
2831         }
2832
2833         crtc_state->ips_enabled = true;
2834
2835         return 0;
2836 }
2837
2838 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2839 {
2840         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2841
2842         /* GDG double wide on either pipe, otherwise pipe A only */
2843         return DISPLAY_VER(dev_priv) < 4 &&
2844                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2845 }
2846
2847 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2848 {
2849         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2850         struct drm_rect src;
2851
2852         /*
2853          * We only use IF-ID interlacing. If we ever use
2854          * PF-ID we'll need to adjust the pixel_rate here.
2855          */
2856
2857         if (!crtc_state->pch_pfit.enabled)
2858                 return pixel_rate;
2859
2860         drm_rect_init(&src, 0, 0,
2861                       crtc_state->pipe_src_w << 16,
2862                       crtc_state->pipe_src_h << 16);
2863
2864         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2865                                    pixel_rate);
2866 }
2867
2868 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2869                                          const struct drm_display_mode *timings)
2870 {
2871         mode->hdisplay = timings->crtc_hdisplay;
2872         mode->htotal = timings->crtc_htotal;
2873         mode->hsync_start = timings->crtc_hsync_start;
2874         mode->hsync_end = timings->crtc_hsync_end;
2875
2876         mode->vdisplay = timings->crtc_vdisplay;
2877         mode->vtotal = timings->crtc_vtotal;
2878         mode->vsync_start = timings->crtc_vsync_start;
2879         mode->vsync_end = timings->crtc_vsync_end;
2880
2881         mode->flags = timings->flags;
2882         mode->type = DRM_MODE_TYPE_DRIVER;
2883
2884         mode->clock = timings->crtc_clock;
2885
2886         drm_mode_set_name(mode);
2887 }
2888
2889 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2890 {
2891         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2892
2893         if (HAS_GMCH(dev_priv))
2894                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2895                 crtc_state->pixel_rate =
2896                         crtc_state->hw.pipe_mode.crtc_clock;
2897         else
2898                 crtc_state->pixel_rate =
2899                         ilk_pipe_pixel_rate(crtc_state);
2900 }
2901
2902 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2903 {
2904         struct drm_display_mode *mode = &crtc_state->hw.mode;
2905         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2906         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2907
2908         drm_mode_copy(pipe_mode, adjusted_mode);
2909
2910         if (crtc_state->bigjoiner) {
2911                 /*
2912                  * transcoder is programmed to the full mode,
2913                  * but pipe timings are half of the transcoder mode
2914                  */
2915                 pipe_mode->crtc_hdisplay /= 2;
2916                 pipe_mode->crtc_hblank_start /= 2;
2917                 pipe_mode->crtc_hblank_end /= 2;
2918                 pipe_mode->crtc_hsync_start /= 2;
2919                 pipe_mode->crtc_hsync_end /= 2;
2920                 pipe_mode->crtc_htotal /= 2;
2921                 pipe_mode->crtc_clock /= 2;
2922         }
2923
2924         if (crtc_state->splitter.enable) {
2925                 int n = crtc_state->splitter.link_count;
2926                 int overlap = crtc_state->splitter.pixel_overlap;
2927
2928                 /*
2929                  * eDP MSO uses segment timings from EDID for transcoder
2930                  * timings, but full mode for everything else.
2931                  *
2932                  * h_full = (h_segment - pixel_overlap) * link_count
2933                  */
2934                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2935                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2936                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2937                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2938                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2939                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2940                 pipe_mode->crtc_clock *= n;
2941
2942                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2943                 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2944         } else {
2945                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2946                 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2947         }
2948
2949         intel_crtc_compute_pixel_rate(crtc_state);
2950
2951         drm_mode_copy(mode, adjusted_mode);
2952         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2953         mode->vdisplay = crtc_state->pipe_src_h;
2954 }
2955
2956 static void intel_encoder_get_config(struct intel_encoder *encoder,
2957                                      struct intel_crtc_state *crtc_state)
2958 {
2959         encoder->get_config(encoder, crtc_state);
2960
2961         intel_crtc_readout_derived_state(crtc_state);
2962 }
2963
2964 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2965                                      struct intel_crtc_state *pipe_config)
2966 {
2967         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2968         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2969         int clock_limit = dev_priv->max_dotclk_freq;
2970
2971         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2972
2973         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2974         if (pipe_config->bigjoiner) {
2975                 pipe_mode->crtc_clock /= 2;
2976                 pipe_mode->crtc_hdisplay /= 2;
2977                 pipe_mode->crtc_hblank_start /= 2;
2978                 pipe_mode->crtc_hblank_end /= 2;
2979                 pipe_mode->crtc_hsync_start /= 2;
2980                 pipe_mode->crtc_hsync_end /= 2;
2981                 pipe_mode->crtc_htotal /= 2;
2982                 pipe_config->pipe_src_w /= 2;
2983         }
2984
2985         if (pipe_config->splitter.enable) {
2986                 int n = pipe_config->splitter.link_count;
2987                 int overlap = pipe_config->splitter.pixel_overlap;
2988
2989                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2990                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2991                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2992                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2993                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2994                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2995                 pipe_mode->crtc_clock *= n;
2996         }
2997
2998         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2999
3000         if (DISPLAY_VER(dev_priv) < 4) {
3001                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
3002
3003                 /*
3004                  * Enable double wide mode when the dot clock
3005                  * is > 90% of the (display) core speed.
3006                  */
3007                 if (intel_crtc_supports_double_wide(crtc) &&
3008                     pipe_mode->crtc_clock > clock_limit) {
3009                         clock_limit = dev_priv->max_dotclk_freq;
3010                         pipe_config->double_wide = true;
3011                 }
3012         }
3013
3014         if (pipe_mode->crtc_clock > clock_limit) {
3015                 drm_dbg_kms(&dev_priv->drm,
3016                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3017                             pipe_mode->crtc_clock, clock_limit,
3018                             yesno(pipe_config->double_wide));
3019                 return -EINVAL;
3020         }
3021
3022         /*
3023          * Pipe horizontal size must be even in:
3024          * - DVO ganged mode
3025          * - LVDS dual channel mode
3026          * - Double wide pipe
3027          */
3028         if (pipe_config->pipe_src_w & 1) {
3029                 if (pipe_config->double_wide) {
3030                         drm_dbg_kms(&dev_priv->drm,
3031                                     "Odd pipe source width not supported with double wide pipe\n");
3032                         return -EINVAL;
3033                 }
3034
3035                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3036                     intel_is_dual_link_lvds(dev_priv)) {
3037                         drm_dbg_kms(&dev_priv->drm,
3038                                     "Odd pipe source width not supported with dual link LVDS\n");
3039                         return -EINVAL;
3040                 }
3041         }
3042
3043         intel_crtc_compute_pixel_rate(pipe_config);
3044
3045         if (pipe_config->has_pch_encoder)
3046                 return ilk_fdi_compute_config(crtc, pipe_config);
3047
3048         return 0;
3049 }
3050
3051 static void
3052 intel_reduce_m_n_ratio(u32 *num, u32 *den)
3053 {
3054         while (*num > DATA_LINK_M_N_MASK ||
3055                *den > DATA_LINK_M_N_MASK) {
3056                 *num >>= 1;
3057                 *den >>= 1;
3058         }
3059 }
3060
3061 static void compute_m_n(unsigned int m, unsigned int n,
3062                         u32 *ret_m, u32 *ret_n,
3063                         bool constant_n)
3064 {
3065         /*
3066          * Several DP dongles in particular seem to be fussy about
3067          * too large link M/N values. Give N value as 0x8000 that
3068          * should be acceptable by specific devices. 0x8000 is the
3069          * specified fixed N value for asynchronous clock mode,
3070          * which the devices expect also in synchronous clock mode.
3071          */
3072         if (constant_n)
3073                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
3074         else
3075                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3076
3077         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3078         intel_reduce_m_n_ratio(ret_m, ret_n);
3079 }
3080
3081 void
3082 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3083                        int pixel_clock, int link_clock,
3084                        struct intel_link_m_n *m_n,
3085                        bool constant_n, bool fec_enable)
3086 {
3087         u32 data_clock = bits_per_pixel * pixel_clock;
3088
3089         if (fec_enable)
3090                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
3091
3092         m_n->tu = 64;
3093         compute_m_n(data_clock,
3094                     link_clock * nlanes * 8,
3095                     &m_n->data_m, &m_n->data_n,
3096                     constant_n);
3097
3098         compute_m_n(pixel_clock, link_clock,
3099                     &m_n->link_m, &m_n->link_n,
3100                     constant_n);
3101 }
3102
3103 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3104 {
3105         /*
3106          * There may be no VBT; and if the BIOS enabled SSC we can
3107          * just keep using it to avoid unnecessary flicker.  Whereas if the
3108          * BIOS isn't using it, don't assume it will work even if the VBT
3109          * indicates as much.
3110          */
3111         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3112                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3113                                                        PCH_DREF_CONTROL) &
3114                         DREF_SSC1_ENABLE;
3115
3116                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3117                         drm_dbg_kms(&dev_priv->drm,
3118                                     "SSC %s by BIOS, overriding VBT which says %s\n",
3119                                     enableddisabled(bios_lvds_use_ssc),
3120                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
3121                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3122                 }
3123         }
3124 }
3125
3126 static void intel_set_m_n(struct drm_i915_private *i915,
3127                           const struct intel_link_m_n *m_n,
3128                           i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3129                           i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3130 {
3131         intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
3132         intel_de_write(i915, data_n_reg, m_n->data_n);
3133         intel_de_write(i915, link_m_reg, m_n->link_m);
3134         intel_de_write(i915, link_n_reg, m_n->link_n);
3135 }
3136
3137 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3138                                          const struct intel_link_m_n *m_n)
3139 {
3140         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3141         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3142         enum pipe pipe = crtc->pipe;
3143
3144         intel_set_m_n(dev_priv, m_n,
3145                       PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
3146                       PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
3147 }
3148
3149 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3150                                  enum transcoder transcoder)
3151 {
3152         if (IS_HASWELL(dev_priv))
3153                 return transcoder == TRANSCODER_EDP;
3154
3155         /*
3156          * Strictly speaking some registers are available before
3157          * gen7, but we only support DRRS on gen7+
3158          */
3159         return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3160 }
3161
3162 void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3163                                   const struct intel_link_m_n *m_n,
3164                                   const struct intel_link_m_n *m2_n2)
3165 {
3166         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3167         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3168         enum pipe pipe = crtc->pipe;
3169         enum transcoder transcoder = crtc_state->cpu_transcoder;
3170
3171         if (DISPLAY_VER(dev_priv) >= 5) {
3172                 intel_set_m_n(dev_priv, m_n,
3173                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3174                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3175                 /*
3176                  *  M2_N2 registers are set only if DRRS is supported
3177                  * (to make sure the registers are not unnecessarily accessed).
3178                  */
3179                 if (m2_n2 && crtc_state->has_drrs &&
3180                     transcoder_has_m2_n2(dev_priv, transcoder)) {
3181                         intel_set_m_n(dev_priv, m2_n2,
3182                                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3183                                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3184                 }
3185         } else {
3186                 intel_set_m_n(dev_priv, m_n,
3187                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3188                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3189         }
3190 }
3191
3192 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3193 {
3194         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3195         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3196         enum pipe pipe = crtc->pipe;
3197         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3198         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3199         u32 crtc_vtotal, crtc_vblank_end;
3200         int vsyncshift = 0;
3201
3202         /* We need to be careful not to changed the adjusted mode, for otherwise
3203          * the hw state checker will get angry at the mismatch. */
3204         crtc_vtotal = adjusted_mode->crtc_vtotal;
3205         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3206
3207         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3208                 /* the chip adds 2 halflines automatically */
3209                 crtc_vtotal -= 1;
3210                 crtc_vblank_end -= 1;
3211
3212                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3213                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3214                 else
3215                         vsyncshift = adjusted_mode->crtc_hsync_start -
3216                                 adjusted_mode->crtc_htotal / 2;
3217                 if (vsyncshift < 0)
3218                         vsyncshift += adjusted_mode->crtc_htotal;
3219         }
3220
3221         if (DISPLAY_VER(dev_priv) > 3)
3222                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3223                                vsyncshift);
3224
3225         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3226                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3227         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3228                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3229         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3230                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3231
3232         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3233                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3234         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3235                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3236         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3237                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3238
3239         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3240          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3241          * documented on the DDI_FUNC_CTL register description, EDP Input Select
3242          * bits. */
3243         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3244             (pipe == PIPE_B || pipe == PIPE_C))
3245                 intel_de_write(dev_priv, VTOTAL(pipe),
3246                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3247
3248 }
3249
3250 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3251 {
3252         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3253         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3254         enum pipe pipe = crtc->pipe;
3255
3256         /* pipesrc controls the size that is scaled from, which should
3257          * always be the user's requested size.
3258          */
3259         intel_de_write(dev_priv, PIPESRC(pipe),
3260                        PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) |
3261                        PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1));
3262 }
3263
3264 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3265 {
3266         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3267         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3268
3269         if (DISPLAY_VER(dev_priv) == 2)
3270                 return false;
3271
3272         if (DISPLAY_VER(dev_priv) >= 9 ||
3273             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3274                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3275         else
3276                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3277 }
3278
3279 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3280                                          struct intel_crtc_state *pipe_config)
3281 {
3282         struct drm_device *dev = crtc->base.dev;
3283         struct drm_i915_private *dev_priv = to_i915(dev);
3284         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3285         u32 tmp;
3286
3287         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3288         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3289         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3290
3291         if (!transcoder_is_dsi(cpu_transcoder)) {
3292                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3293                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
3294                                                         (tmp & 0xffff) + 1;
3295                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
3296                                                 ((tmp >> 16) & 0xffff) + 1;
3297         }
3298         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3299         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3300         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3301
3302         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3303         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3304         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3305
3306         if (!transcoder_is_dsi(cpu_transcoder)) {
3307                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3308                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
3309                                                         (tmp & 0xffff) + 1;
3310                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
3311                                                 ((tmp >> 16) & 0xffff) + 1;
3312         }
3313         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3314         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3315         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3316
3317         if (intel_pipe_is_interlaced(pipe_config)) {
3318                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3319                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3320                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3321         }
3322 }
3323
3324 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3325                                     struct intel_crtc_state *pipe_config)
3326 {
3327         struct drm_device *dev = crtc->base.dev;
3328         struct drm_i915_private *dev_priv = to_i915(dev);
3329         u32 tmp;
3330
3331         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3332         pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1;
3333         pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1;
3334 }
3335
3336 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3337 {
3338         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3339         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3340         u32 pipeconf;
3341
3342         pipeconf = 0;
3343
3344         /* we keep both pipes enabled on 830 */
3345         if (IS_I830(dev_priv))
3346                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3347
3348         if (crtc_state->double_wide)
3349                 pipeconf |= PIPECONF_DOUBLE_WIDE;
3350
3351         /* only g4x and later have fancy bpc/dither controls */
3352         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3353             IS_CHERRYVIEW(dev_priv)) {
3354                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3355                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3356                         pipeconf |= PIPECONF_DITHER_EN |
3357                                     PIPECONF_DITHER_TYPE_SP;
3358
3359                 switch (crtc_state->pipe_bpp) {
3360                 case 18:
3361                         pipeconf |= PIPECONF_BPC_6;
3362                         break;
3363                 case 24:
3364                         pipeconf |= PIPECONF_BPC_8;
3365                         break;
3366                 case 30:
3367                         pipeconf |= PIPECONF_BPC_10;
3368                         break;
3369                 default:
3370                         /* Case prevented by intel_choose_pipe_bpp_dither. */
3371                         BUG();
3372                 }
3373         }
3374
3375         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3376                 if (DISPLAY_VER(dev_priv) < 4 ||
3377                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3378                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3379                 else
3380                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3381         } else {
3382                 pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
3383         }
3384
3385         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3386              crtc_state->limited_color_range)
3387                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3388
3389         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3390
3391         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3392
3393         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3394         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3395 }
3396
3397 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3398 {
3399         if (IS_I830(dev_priv))
3400                 return false;
3401
3402         return DISPLAY_VER(dev_priv) >= 4 ||
3403                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3404 }
3405
3406 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3407 {
3408         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3409         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3410         u32 tmp;
3411
3412         if (!i9xx_has_pfit(dev_priv))
3413                 return;
3414
3415         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3416         if (!(tmp & PFIT_ENABLE))
3417                 return;
3418
3419         /* Check whether the pfit is attached to our pipe. */
3420         if (DISPLAY_VER(dev_priv) < 4) {
3421                 if (crtc->pipe != PIPE_B)
3422                         return;
3423         } else {
3424                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3425                         return;
3426         }
3427
3428         crtc_state->gmch_pfit.control = tmp;
3429         crtc_state->gmch_pfit.pgm_ratios =
3430                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3431 }
3432
3433 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3434                                struct intel_crtc_state *pipe_config)
3435 {
3436         struct drm_device *dev = crtc->base.dev;
3437         struct drm_i915_private *dev_priv = to_i915(dev);
3438         enum pipe pipe = crtc->pipe;
3439         struct dpll clock;
3440         u32 mdiv;
3441         int refclk = 100000;
3442
3443         /* In case of DSI, DPLL will not be used */
3444         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3445                 return;
3446
3447         vlv_dpio_get(dev_priv);
3448         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3449         vlv_dpio_put(dev_priv);
3450
3451         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3452         clock.m2 = mdiv & DPIO_M2DIV_MASK;
3453         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3454         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3455         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3456
3457         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3458 }
3459
3460 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3461                                struct intel_crtc_state *pipe_config)
3462 {
3463         struct drm_device *dev = crtc->base.dev;
3464         struct drm_i915_private *dev_priv = to_i915(dev);
3465         enum pipe pipe = crtc->pipe;
3466         enum dpio_channel port = vlv_pipe_to_channel(pipe);
3467         struct dpll clock;
3468         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3469         int refclk = 100000;
3470
3471         /* In case of DSI, DPLL will not be used */
3472         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3473                 return;
3474
3475         vlv_dpio_get(dev_priv);
3476         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3477         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3478         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3479         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3480         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3481         vlv_dpio_put(dev_priv);
3482
3483         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3484         clock.m2 = (pll_dw0 & 0xff) << 22;
3485         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3486                 clock.m2 |= pll_dw2 & 0x3fffff;
3487         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3488         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3489         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3490
3491         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3492 }
3493
3494 static enum intel_output_format
3495 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3496 {
3497         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3498         u32 tmp;
3499
3500         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3501
3502         if (tmp & PIPEMISC_YUV420_ENABLE) {
3503                 /* We support 4:2:0 in full blend mode only */
3504                 drm_WARN_ON(&dev_priv->drm,
3505                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3506
3507                 return INTEL_OUTPUT_FORMAT_YCBCR420;
3508         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3509                 return INTEL_OUTPUT_FORMAT_YCBCR444;
3510         } else {
3511                 return INTEL_OUTPUT_FORMAT_RGB;
3512         }
3513 }
3514
3515 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3516 {
3517         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3518         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3519         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3520         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3521         u32 tmp;
3522
3523         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3524
3525         if (tmp & DISP_PIPE_GAMMA_ENABLE)
3526                 crtc_state->gamma_enable = true;
3527
3528         if (!HAS_GMCH(dev_priv) &&
3529             tmp & DISP_PIPE_CSC_ENABLE)
3530                 crtc_state->csc_enable = true;
3531 }
3532
3533 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3534                                  struct intel_crtc_state *pipe_config)
3535 {
3536         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3537         enum intel_display_power_domain power_domain;
3538         intel_wakeref_t wakeref;
3539         u32 tmp;
3540         bool ret;
3541
3542         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3543         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3544         if (!wakeref)
3545                 return false;
3546
3547         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3548         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3549         pipe_config->shared_dpll = NULL;
3550
3551         ret = false;
3552
3553         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3554         if (!(tmp & PIPECONF_ENABLE))
3555                 goto out;
3556
3557         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3558             IS_CHERRYVIEW(dev_priv)) {
3559                 switch (tmp & PIPECONF_BPC_MASK) {
3560                 case PIPECONF_BPC_6:
3561                         pipe_config->pipe_bpp = 18;
3562                         break;
3563                 case PIPECONF_BPC_8:
3564                         pipe_config->pipe_bpp = 24;
3565                         break;
3566                 case PIPECONF_BPC_10:
3567                         pipe_config->pipe_bpp = 30;
3568                         break;
3569                 default:
3570                         MISSING_CASE(tmp);
3571                         break;
3572                 }
3573         }
3574
3575         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3576             (tmp & PIPECONF_COLOR_RANGE_SELECT))
3577                 pipe_config->limited_color_range = true;
3578
3579         pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
3580
3581         if (IS_CHERRYVIEW(dev_priv))
3582                 pipe_config->cgm_mode = intel_de_read(dev_priv,
3583                                                       CGM_PIPE_MODE(crtc->pipe));
3584
3585         i9xx_get_pipe_color_config(pipe_config);
3586         intel_color_get_config(pipe_config);
3587
3588         if (DISPLAY_VER(dev_priv) < 4)
3589                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3590
3591         intel_get_transcoder_timings(crtc, pipe_config);
3592         intel_get_pipe_src_size(crtc, pipe_config);
3593
3594         i9xx_get_pfit_config(pipe_config);
3595
3596         if (DISPLAY_VER(dev_priv) >= 4) {
3597                 /* No way to read it out on pipes B and C */
3598                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3599                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
3600                 else
3601                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3602                 pipe_config->pixel_multiplier =
3603                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3604                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3605                 pipe_config->dpll_hw_state.dpll_md = tmp;
3606         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3607                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3608                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3609                 pipe_config->pixel_multiplier =
3610                         ((tmp & SDVO_MULTIPLIER_MASK)
3611                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3612         } else {
3613                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3614                  * port and will be fixed up in the encoder->get_config
3615                  * function. */
3616                 pipe_config->pixel_multiplier = 1;
3617         }
3618         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3619                                                         DPLL(crtc->pipe));
3620         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3621                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3622                                                                FP0(crtc->pipe));
3623                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3624                                                                FP1(crtc->pipe));
3625         } else {
3626                 /* Mask out read-only status bits. */
3627                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3628                                                      DPLL_PORTC_READY_MASK |
3629                                                      DPLL_PORTB_READY_MASK);
3630         }
3631
3632         if (IS_CHERRYVIEW(dev_priv))
3633                 chv_crtc_clock_get(crtc, pipe_config);
3634         else if (IS_VALLEYVIEW(dev_priv))
3635                 vlv_crtc_clock_get(crtc, pipe_config);
3636         else
3637                 i9xx_crtc_clock_get(crtc, pipe_config);
3638
3639         /*
3640          * Normally the dotclock is filled in by the encoder .get_config()
3641          * but in case the pipe is enabled w/o any ports we need a sane
3642          * default.
3643          */
3644         pipe_config->hw.adjusted_mode.crtc_clock =
3645                 pipe_config->port_clock / pipe_config->pixel_multiplier;
3646
3647         ret = true;
3648
3649 out:
3650         intel_display_power_put(dev_priv, power_domain, wakeref);
3651
3652         return ret;
3653 }
3654
3655 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3656 {
3657         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3658         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3659         enum pipe pipe = crtc->pipe;
3660         u32 val;
3661
3662         val = 0;
3663
3664         switch (crtc_state->pipe_bpp) {
3665         case 18:
3666                 val |= PIPECONF_BPC_6;
3667                 break;
3668         case 24:
3669                 val |= PIPECONF_BPC_8;
3670                 break;
3671         case 30:
3672                 val |= PIPECONF_BPC_10;
3673                 break;
3674         case 36:
3675                 val |= PIPECONF_BPC_12;
3676                 break;
3677         default:
3678                 /* Case prevented by intel_choose_pipe_bpp_dither. */
3679                 BUG();
3680         }
3681
3682         if (crtc_state->dither)
3683                 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3684
3685         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3686                 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3687         else
3688                 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3689
3690         /*
3691          * This would end up with an odd purple hue over
3692          * the entire display. Make sure we don't do it.
3693          */
3694         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3695                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3696
3697         if (crtc_state->limited_color_range &&
3698             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3699                 val |= PIPECONF_COLOR_RANGE_SELECT;
3700
3701         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3702                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3703
3704         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3705
3706         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3707
3708         intel_de_write(dev_priv, PIPECONF(pipe), val);
3709         intel_de_posting_read(dev_priv, PIPECONF(pipe));
3710 }
3711
3712 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3713 {
3714         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3715         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3716         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3717         u32 val = 0;
3718
3719         if (IS_HASWELL(dev_priv) && crtc_state->dither)
3720                 val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
3721
3722         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3723                 val |= PIPECONF_INTERLACE_IF_ID_ILK;
3724         else
3725                 val |= PIPECONF_INTERLACE_PF_PD_ILK;
3726
3727         if (IS_HASWELL(dev_priv) &&
3728             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3729                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3730
3731         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3732         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3733 }
3734
3735 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3736 {
3737         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3738         const struct intel_crtc_scaler_state *scaler_state =
3739                 &crtc_state->scaler_state;
3740
3741         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3742         u32 val = 0;
3743         int i;
3744
3745         switch (crtc_state->pipe_bpp) {
3746         case 18:
3747                 val |= PIPEMISC_BPC_6;
3748                 break;
3749         case 24:
3750                 val |= PIPEMISC_BPC_8;
3751                 break;
3752         case 30:
3753                 val |= PIPEMISC_BPC_10;
3754                 break;
3755         case 36:
3756                 /* Port output 12BPC defined for ADLP+ */
3757                 if (DISPLAY_VER(dev_priv) > 12)
3758                         val |= PIPEMISC_BPC_12_ADLP;
3759                 break;
3760         default:
3761                 MISSING_CASE(crtc_state->pipe_bpp);
3762                 break;
3763         }
3764
3765         if (crtc_state->dither)
3766                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3767
3768         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3769             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3770                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3771
3772         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3773                 val |= PIPEMISC_YUV420_ENABLE |
3774                         PIPEMISC_YUV420_MODE_FULL_BLEND;
3775
3776         if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3777                 val |= PIPEMISC_HDR_MODE_PRECISION;
3778
3779         if (DISPLAY_VER(dev_priv) >= 12)
3780                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3781
3782         if (IS_ALDERLAKE_P(dev_priv)) {
3783                 bool scaler_in_use = false;
3784
3785                 for (i = 0; i < crtc->num_scalers; i++) {
3786                         if (!scaler_state->scalers[i].in_use)
3787                                 continue;
3788
3789                         scaler_in_use = true;
3790                         break;
3791                 }
3792
3793                 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3794                              PIPE_MISC2_BUBBLE_COUNTER_MASK,
3795                              scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3796                              PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3797         }
3798
3799         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3800 }
3801
3802 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3803 {
3804         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3805         u32 tmp;
3806
3807         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3808
3809         switch (tmp & PIPEMISC_BPC_MASK) {
3810         case PIPEMISC_BPC_6:
3811                 return 18;
3812         case PIPEMISC_BPC_8:
3813                 return 24;
3814         case PIPEMISC_BPC_10:
3815                 return 30;
3816         /*
3817          * PORT OUTPUT 12 BPC defined for ADLP+.
3818          *
3819          * TODO:
3820          * For previous platforms with DSI interface, bits 5:7
3821          * are used for storing pipe_bpp irrespective of dithering.
3822          * Since the value of 12 BPC is not defined for these bits
3823          * on older platforms, need to find a workaround for 12 BPC
3824          * MIPI DSI HW readout.
3825          */
3826         case PIPEMISC_BPC_12_ADLP:
3827                 if (DISPLAY_VER(dev_priv) > 12)
3828                         return 36;
3829                 fallthrough;
3830         default:
3831                 MISSING_CASE(tmp);
3832                 return 0;
3833         }
3834 }
3835
3836 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3837 {
3838         /*
3839          * Account for spread spectrum to avoid
3840          * oversubscribing the link. Max center spread
3841          * is 2.5%; use 5% for safety's sake.
3842          */
3843         u32 bps = target_clock * bpp * 21 / 20;
3844         return DIV_ROUND_UP(bps, link_bw * 8);
3845 }
3846
3847 static void intel_get_m_n(struct drm_i915_private *i915,
3848                           struct intel_link_m_n *m_n,
3849                           i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3850                           i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3851 {
3852         m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3853         m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3854         m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3855         m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3856         m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3857 }
3858
3859 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3860                                          struct intel_link_m_n *m_n)
3861 {
3862         struct drm_device *dev = crtc->base.dev;
3863         struct drm_i915_private *dev_priv = to_i915(dev);
3864         enum pipe pipe = crtc->pipe;
3865
3866         intel_get_m_n(dev_priv, m_n,
3867                       PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
3868                       PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
3869 }
3870
3871 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3872                                          enum transcoder transcoder,
3873                                          struct intel_link_m_n *m_n,
3874                                          struct intel_link_m_n *m2_n2)
3875 {
3876         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3877         enum pipe pipe = crtc->pipe;
3878
3879         if (DISPLAY_VER(dev_priv) >= 5) {
3880                 intel_get_m_n(dev_priv, m_n,
3881                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3882                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3883
3884                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3885                         intel_get_m_n(dev_priv, m2_n2,
3886                                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3887                                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3888                 }
3889         } else {
3890                 intel_get_m_n(dev_priv, m_n,
3891                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3892                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3893         }
3894 }
3895
3896 void intel_dp_get_m_n(struct intel_crtc *crtc,
3897                       struct intel_crtc_state *pipe_config)
3898 {
3899         if (pipe_config->has_pch_encoder)
3900                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3901         else
3902                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3903                                              &pipe_config->dp_m_n,
3904                                              &pipe_config->dp_m2_n2);
3905 }
3906
3907 void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3908                             struct intel_crtc_state *pipe_config)
3909 {
3910         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3911                                      &pipe_config->fdi_m_n, NULL);
3912 }
3913
3914 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3915                                   u32 pos, u32 size)
3916 {
3917         drm_rect_init(&crtc_state->pch_pfit.dst,
3918                       pos >> 16, pos & 0xffff,
3919                       size >> 16, size & 0xffff);
3920 }
3921
3922 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3923 {
3924         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3925         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3926         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3927         int id = -1;
3928         int i;
3929
3930         /* find scaler attached to this pipe */
3931         for (i = 0; i < crtc->num_scalers; i++) {
3932                 u32 ctl, pos, size;
3933
3934                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3935                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3936                         continue;
3937
3938                 id = i;
3939                 crtc_state->pch_pfit.enabled = true;
3940
3941                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3942                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3943
3944                 ilk_get_pfit_pos_size(crtc_state, pos, size);
3945
3946                 scaler_state->scalers[i].in_use = true;
3947                 break;
3948         }
3949
3950         scaler_state->scaler_id = id;
3951         if (id >= 0)
3952                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3953         else
3954                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3955 }
3956
3957 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3958 {
3959         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3960         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3961         u32 ctl, pos, size;
3962
3963         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3964         if ((ctl & PF_ENABLE) == 0)
3965                 return;
3966
3967         crtc_state->pch_pfit.enabled = true;
3968
3969         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3970         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3971
3972         ilk_get_pfit_pos_size(crtc_state, pos, size);
3973
3974         /*
3975          * We currently do not free assignements of panel fitters on
3976          * ivb/hsw (since we don't use the higher upscaling modes which
3977          * differentiates them) so just WARN about this case for now.
3978          */
3979         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3980                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3981 }
3982
3983 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3984                                 struct intel_crtc_state *pipe_config)
3985 {
3986         struct drm_device *dev = crtc->base.dev;
3987         struct drm_i915_private *dev_priv = to_i915(dev);
3988         enum intel_display_power_domain power_domain;
3989         intel_wakeref_t wakeref;
3990         u32 tmp;
3991         bool ret;
3992
3993         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3994         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3995         if (!wakeref)
3996                 return false;
3997
3998         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3999         pipe_config->shared_dpll = NULL;
4000
4001         ret = false;
4002         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4003         if (!(tmp & PIPECONF_ENABLE))
4004                 goto out;
4005
4006         switch (tmp & PIPECONF_BPC_MASK) {
4007         case PIPECONF_BPC_6:
4008                 pipe_config->pipe_bpp = 18;
4009                 break;
4010         case PIPECONF_BPC_8:
4011                 pipe_config->pipe_bpp = 24;
4012                 break;
4013         case PIPECONF_BPC_10:
4014                 pipe_config->pipe_bpp = 30;
4015                 break;
4016         case PIPECONF_BPC_12:
4017                 pipe_config->pipe_bpp = 36;
4018                 break;
4019         default:
4020                 break;
4021         }
4022
4023         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4024                 pipe_config->limited_color_range = true;
4025
4026         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4027         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4028         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4029                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4030                 break;
4031         default:
4032                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4033                 break;
4034         }
4035
4036         pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
4037
4038         pipe_config->csc_mode = intel_de_read(dev_priv,
4039                                               PIPE_CSC_MODE(crtc->pipe));
4040
4041         i9xx_get_pipe_color_config(pipe_config);
4042         intel_color_get_config(pipe_config);
4043
4044         pipe_config->pixel_multiplier = 1;
4045
4046         ilk_pch_get_config(pipe_config);
4047
4048         intel_get_transcoder_timings(crtc, pipe_config);
4049         intel_get_pipe_src_size(crtc, pipe_config);
4050
4051         ilk_get_pfit_config(pipe_config);
4052
4053         ret = true;
4054
4055 out:
4056         intel_display_power_put(dev_priv, power_domain, wakeref);
4057
4058         return ret;
4059 }
4060
4061 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4062 {
4063         if (DISPLAY_VER(i915) >= 12)
4064                 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4065         else if (DISPLAY_VER(i915) >= 11)
4066                 return BIT(PIPE_B) | BIT(PIPE_C);
4067         else
4068                 return 0;
4069 }
4070
4071 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4072                                            enum transcoder cpu_transcoder)
4073 {
4074         enum intel_display_power_domain power_domain;
4075         intel_wakeref_t wakeref;
4076         u32 tmp = 0;
4077
4078         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4079
4080         with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4081                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4082
4083         return tmp & TRANS_DDI_FUNC_ENABLE;
4084 }
4085
4086 static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4087 {
4088         u8 master_pipes = 0, slave_pipes = 0;
4089         struct intel_crtc *crtc;
4090
4091         for_each_intel_crtc(&dev_priv->drm, crtc) {
4092                 enum intel_display_power_domain power_domain;
4093                 enum pipe pipe = crtc->pipe;
4094                 intel_wakeref_t wakeref;
4095
4096                 if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4097                         continue;
4098
4099                 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4100                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4101                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4102
4103                         if (!(tmp & BIG_JOINER_ENABLE))
4104                                 continue;
4105
4106                         if (tmp & MASTER_BIG_JOINER_ENABLE)
4107                                 master_pipes |= BIT(pipe);
4108                         else
4109                                 slave_pipes |= BIT(pipe);
4110                 }
4111
4112                 if (DISPLAY_VER(dev_priv) < 13)
4113                         continue;
4114
4115                 power_domain = POWER_DOMAIN_PIPE(pipe);
4116                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4117                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4118
4119                         if (tmp & UNCOMPRESSED_JOINER_MASTER)
4120                                 master_pipes |= BIT(pipe);
4121                         if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4122                                 slave_pipes |= BIT(pipe);
4123                 }
4124         }
4125
4126         /* Bigjoiner pipes should always be consecutive master and slave */
4127         drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4128                  "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4129                  master_pipes, slave_pipes);
4130
4131         return slave_pipes;
4132 }
4133
4134 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4135 {
4136         u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4137
4138         if (DISPLAY_VER(i915) >= 11)
4139                 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4140
4141         return panel_transcoder_mask;
4142 }
4143
4144 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4145 {
4146         struct drm_device *dev = crtc->base.dev;
4147         struct drm_i915_private *dev_priv = to_i915(dev);
4148         u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4149         enum transcoder cpu_transcoder;
4150         u8 enabled_transcoders = 0;
4151
4152         /*
4153          * XXX: Do intel_display_power_get_if_enabled before reading this (for
4154          * consistency and less surprising code; it's in always on power).
4155          */
4156         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4157                                        panel_transcoder_mask) {
4158                 enum intel_display_power_domain power_domain;
4159                 intel_wakeref_t wakeref;
4160                 enum pipe trans_pipe;
4161                 u32 tmp = 0;
4162
4163                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4164                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4165                         tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4166
4167                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4168                         continue;
4169
4170                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4171                 default:
4172                         drm_WARN(dev, 1,
4173                                  "unknown pipe linked to transcoder %s\n",
4174                                  transcoder_name(cpu_transcoder));
4175                         fallthrough;
4176                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
4177                 case TRANS_DDI_EDP_INPUT_A_ON:
4178                         trans_pipe = PIPE_A;
4179                         break;
4180                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
4181                         trans_pipe = PIPE_B;
4182                         break;
4183                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
4184                         trans_pipe = PIPE_C;
4185                         break;
4186                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
4187                         trans_pipe = PIPE_D;
4188                         break;
4189                 }
4190
4191                 if (trans_pipe == crtc->pipe)
4192                         enabled_transcoders |= BIT(cpu_transcoder);
4193         }
4194
4195         /* single pipe or bigjoiner master */
4196         cpu_transcoder = (enum transcoder) crtc->pipe;
4197         if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4198                 enabled_transcoders |= BIT(cpu_transcoder);
4199
4200         /* bigjoiner slave -> consider the master pipe's transcoder as well */
4201         if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4202                 cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4203                 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4204                         enabled_transcoders |= BIT(cpu_transcoder);
4205         }
4206
4207         return enabled_transcoders;
4208 }
4209
4210 static bool has_edp_transcoders(u8 enabled_transcoders)
4211 {
4212         return enabled_transcoders & BIT(TRANSCODER_EDP);
4213 }
4214
4215 static bool has_dsi_transcoders(u8 enabled_transcoders)
4216 {
4217         return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4218                                       BIT(TRANSCODER_DSI_1));
4219 }
4220
4221 static bool has_pipe_transcoders(u8 enabled_transcoders)
4222 {
4223         return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4224                                        BIT(TRANSCODER_DSI_0) |
4225                                        BIT(TRANSCODER_DSI_1));
4226 }
4227
4228 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4229                                        u8 enabled_transcoders)
4230 {
4231         /* Only one type of transcoder please */
4232         drm_WARN_ON(&i915->drm,
4233                     has_edp_transcoders(enabled_transcoders) +
4234                     has_dsi_transcoders(enabled_transcoders) +
4235                     has_pipe_transcoders(enabled_transcoders) > 1);
4236
4237         /* Only DSI transcoders can be ganged */
4238         drm_WARN_ON(&i915->drm,
4239                     !has_dsi_transcoders(enabled_transcoders) &&
4240                     !is_power_of_2(enabled_transcoders));
4241 }
4242
4243 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4244                                      struct intel_crtc_state *pipe_config,
4245                                      struct intel_display_power_domain_set *power_domain_set)
4246 {
4247         struct drm_device *dev = crtc->base.dev;
4248         struct drm_i915_private *dev_priv = to_i915(dev);
4249         unsigned long enabled_transcoders;
4250         u32 tmp;
4251
4252         enabled_transcoders = hsw_enabled_transcoders(crtc);
4253         if (!enabled_transcoders)
4254                 return false;
4255
4256         assert_enabled_transcoders(dev_priv, enabled_transcoders);
4257
4258         /*
4259          * With the exception of DSI we should only ever have
4260          * a single enabled transcoder. With DSI let's just
4261          * pick the first one.
4262          */
4263         pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4264
4265         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4266                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4267                 return false;
4268
4269         if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4270                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4271
4272                 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4273                         pipe_config->pch_pfit.force_thru = true;
4274         }
4275
4276         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4277
4278         return tmp & PIPECONF_ENABLE;
4279 }
4280
4281 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4282                                          struct intel_crtc_state *pipe_config,
4283                                          struct intel_display_power_domain_set *power_domain_set)
4284 {
4285         struct drm_device *dev = crtc->base.dev;
4286         struct drm_i915_private *dev_priv = to_i915(dev);
4287         enum transcoder cpu_transcoder;
4288         enum port port;
4289         u32 tmp;
4290
4291         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4292                 if (port == PORT_A)
4293                         cpu_transcoder = TRANSCODER_DSI_A;
4294                 else
4295                         cpu_transcoder = TRANSCODER_DSI_C;
4296
4297                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4298                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4299                         continue;
4300
4301                 /*
4302                  * The PLL needs to be enabled with a valid divider
4303                  * configuration, otherwise accessing DSI registers will hang
4304                  * the machine. See BSpec North Display Engine
4305                  * registers/MIPI[BXT]. We can break out here early, since we
4306                  * need the same DSI PLL to be enabled for both DSI ports.
4307                  */
4308                 if (!bxt_dsi_pll_is_enabled(dev_priv))
4309                         break;
4310
4311                 /* XXX: this works for video mode only */
4312                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4313                 if (!(tmp & DPI_ENABLE))
4314                         continue;
4315
4316                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4317                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4318                         continue;
4319
4320                 pipe_config->cpu_transcoder = cpu_transcoder;
4321                 break;
4322         }
4323
4324         return transcoder_is_dsi(pipe_config->cpu_transcoder);
4325 }
4326
4327 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4328                                 struct intel_crtc_state *pipe_config)
4329 {
4330         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4331         struct intel_display_power_domain_set power_domain_set = { };
4332         bool active;
4333         u32 tmp;
4334
4335         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4336                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
4337                 return false;
4338
4339         pipe_config->shared_dpll = NULL;
4340
4341         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4342
4343         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4344             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4345                 drm_WARN_ON(&dev_priv->drm, active);
4346                 active = true;
4347         }
4348
4349         if (!active)
4350                 goto out;
4351
4352         intel_dsc_get_config(pipe_config);
4353         if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4354                 intel_uncompressed_joiner_get_config(pipe_config);
4355
4356         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4357             DISPLAY_VER(dev_priv) >= 11)
4358                 intel_get_transcoder_timings(crtc, pipe_config);
4359
4360         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4361                 intel_vrr_get_config(crtc, pipe_config);
4362
4363         intel_get_pipe_src_size(crtc, pipe_config);
4364
4365         if (IS_HASWELL(dev_priv)) {
4366                 u32 tmp = intel_de_read(dev_priv,
4367                                         PIPECONF(pipe_config->cpu_transcoder));
4368
4369                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4370                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4371                 else
4372                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4373         } else {
4374                 pipe_config->output_format =
4375                         bdw_get_pipemisc_output_format(crtc);
4376         }
4377
4378         pipe_config->gamma_mode = intel_de_read(dev_priv,
4379                                                 GAMMA_MODE(crtc->pipe));
4380
4381         pipe_config->csc_mode = intel_de_read(dev_priv,
4382                                               PIPE_CSC_MODE(crtc->pipe));
4383
4384         if (DISPLAY_VER(dev_priv) >= 9) {
4385                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4386
4387                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4388                         pipe_config->gamma_enable = true;
4389
4390                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4391                         pipe_config->csc_enable = true;
4392         } else {
4393                 i9xx_get_pipe_color_config(pipe_config);
4394         }
4395
4396         intel_color_get_config(pipe_config);
4397
4398         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4399         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4400         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4401                 pipe_config->ips_linetime =
4402                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4403
4404         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4405                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4406                 if (DISPLAY_VER(dev_priv) >= 9)
4407                         skl_get_pfit_config(pipe_config);
4408                 else
4409                         ilk_get_pfit_config(pipe_config);
4410         }
4411
4412         if (hsw_crtc_supports_ips(crtc)) {
4413                 if (IS_HASWELL(dev_priv))
4414                         pipe_config->ips_enabled = intel_de_read(dev_priv,
4415                                                                  IPS_CTL) & IPS_ENABLE;
4416                 else {
4417                         /*
4418                          * We cannot readout IPS state on broadwell, set to
4419                          * true so we can set it to a defined state on first
4420                          * commit.
4421                          */
4422                         pipe_config->ips_enabled = true;
4423                 }
4424         }
4425
4426         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4427             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4428                 pipe_config->pixel_multiplier =
4429                         intel_de_read(dev_priv,
4430                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4431         } else {
4432                 pipe_config->pixel_multiplier = 1;
4433         }
4434
4435 out:
4436         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4437
4438         return active;
4439 }
4440
4441 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4442 {
4443         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4444         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4445
4446         if (!i915->display->get_pipe_config(crtc, crtc_state))
4447                 return false;
4448
4449         crtc_state->hw.active = true;
4450
4451         intel_crtc_readout_derived_state(crtc_state);
4452
4453         return true;
4454 }
4455
4456 /* VESA 640x480x72Hz mode to set on the pipe */
4457 static const struct drm_display_mode load_detect_mode = {
4458         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4459                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4460 };
4461
4462 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4463                                         struct drm_crtc *crtc)
4464 {
4465         struct drm_plane *plane;
4466         struct drm_plane_state *plane_state;
4467         int ret, i;
4468
4469         ret = drm_atomic_add_affected_planes(state, crtc);
4470         if (ret)
4471                 return ret;
4472
4473         for_each_new_plane_in_state(state, plane, plane_state, i) {
4474                 if (plane_state->crtc != crtc)
4475                         continue;
4476
4477                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4478                 if (ret)
4479                         return ret;
4480
4481                 drm_atomic_set_fb_for_plane(plane_state, NULL);
4482         }
4483
4484         return 0;
4485 }
4486
4487 int intel_get_load_detect_pipe(struct drm_connector *connector,
4488                                struct intel_load_detect_pipe *old,
4489                                struct drm_modeset_acquire_ctx *ctx)
4490 {
4491         struct intel_encoder *encoder =
4492                 intel_attached_encoder(to_intel_connector(connector));
4493         struct intel_crtc *possible_crtc;
4494         struct intel_crtc *crtc = NULL;
4495         struct drm_device *dev = encoder->base.dev;
4496         struct drm_i915_private *dev_priv = to_i915(dev);
4497         struct drm_mode_config *config = &dev->mode_config;
4498         struct drm_atomic_state *state = NULL, *restore_state = NULL;
4499         struct drm_connector_state *connector_state;
4500         struct intel_crtc_state *crtc_state;
4501         int ret;
4502
4503         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4504                     connector->base.id, connector->name,
4505                     encoder->base.base.id, encoder->base.name);
4506
4507         old->restore_state = NULL;
4508
4509         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4510
4511         /*
4512          * Algorithm gets a little messy:
4513          *
4514          *   - if the connector already has an assigned crtc, use it (but make
4515          *     sure it's on first)
4516          *
4517          *   - try to find the first unused crtc that can drive this connector,
4518          *     and use that if we find one
4519          */
4520
4521         /* See if we already have a CRTC for this connector */
4522         if (connector->state->crtc) {
4523                 crtc = to_intel_crtc(connector->state->crtc);
4524
4525                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4526                 if (ret)
4527                         goto fail;
4528
4529                 /* Make sure the crtc and connector are running */
4530                 goto found;
4531         }
4532
4533         /* Find an unused one (if possible) */
4534         for_each_intel_crtc(dev, possible_crtc) {
4535                 if (!(encoder->base.possible_crtcs &
4536                       drm_crtc_mask(&possible_crtc->base)))
4537                         continue;
4538
4539                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4540                 if (ret)
4541                         goto fail;
4542
4543                 if (possible_crtc->base.state->enable) {
4544                         drm_modeset_unlock(&possible_crtc->base.mutex);
4545                         continue;
4546                 }
4547
4548                 crtc = possible_crtc;
4549                 break;
4550         }
4551
4552         /*
4553          * If we didn't find an unused CRTC, don't use any.
4554          */
4555         if (!crtc) {
4556                 drm_dbg_kms(&dev_priv->drm,
4557                             "no pipe available for load-detect\n");
4558                 ret = -ENODEV;
4559                 goto fail;
4560         }
4561
4562 found:
4563         state = drm_atomic_state_alloc(dev);
4564         restore_state = drm_atomic_state_alloc(dev);
4565         if (!state || !restore_state) {
4566                 ret = -ENOMEM;
4567                 goto fail;
4568         }
4569
4570         state->acquire_ctx = ctx;
4571         restore_state->acquire_ctx = ctx;
4572
4573         connector_state = drm_atomic_get_connector_state(state, connector);
4574         if (IS_ERR(connector_state)) {
4575                 ret = PTR_ERR(connector_state);
4576                 goto fail;
4577         }
4578
4579         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4580         if (ret)
4581                 goto fail;
4582
4583         crtc_state = intel_atomic_get_crtc_state(state, crtc);
4584         if (IS_ERR(crtc_state)) {
4585                 ret = PTR_ERR(crtc_state);
4586                 goto fail;
4587         }
4588
4589         crtc_state->uapi.active = true;
4590
4591         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4592                                            &load_detect_mode);
4593         if (ret)
4594                 goto fail;
4595
4596         ret = intel_modeset_disable_planes(state, &crtc->base);
4597         if (ret)
4598                 goto fail;
4599
4600         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4601         if (!ret)
4602                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4603         if (!ret)
4604                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4605         if (ret) {
4606                 drm_dbg_kms(&dev_priv->drm,
4607                             "Failed to create a copy of old state to restore: %i\n",
4608                             ret);
4609                 goto fail;
4610         }
4611
4612         ret = drm_atomic_commit(state);
4613         if (ret) {
4614                 drm_dbg_kms(&dev_priv->drm,
4615                             "failed to set mode on load-detect pipe\n");
4616                 goto fail;
4617         }
4618
4619         old->restore_state = restore_state;
4620         drm_atomic_state_put(state);
4621
4622         /* let the connector get through one full cycle before testing */
4623         intel_crtc_wait_for_next_vblank(crtc);
4624
4625         return true;
4626
4627 fail:
4628         if (state) {
4629                 drm_atomic_state_put(state);
4630                 state = NULL;
4631         }
4632         if (restore_state) {
4633                 drm_atomic_state_put(restore_state);
4634                 restore_state = NULL;
4635         }
4636
4637         if (ret == -EDEADLK)
4638                 return ret;
4639
4640         return false;
4641 }
4642
4643 void intel_release_load_detect_pipe(struct drm_connector *connector,
4644                                     struct intel_load_detect_pipe *old,
4645                                     struct drm_modeset_acquire_ctx *ctx)
4646 {
4647         struct intel_encoder *intel_encoder =
4648                 intel_attached_encoder(to_intel_connector(connector));
4649         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4650         struct drm_encoder *encoder = &intel_encoder->base;
4651         struct drm_atomic_state *state = old->restore_state;
4652         int ret;
4653
4654         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4655                     connector->base.id, connector->name,
4656                     encoder->base.id, encoder->name);
4657
4658         if (!state)
4659                 return;
4660
4661         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4662         if (ret)
4663                 drm_dbg_kms(&i915->drm,
4664                             "Couldn't release load detect pipe: %i\n", ret);
4665         drm_atomic_state_put(state);
4666 }
4667
4668 static int i9xx_pll_refclk(struct drm_device *dev,
4669                            const struct intel_crtc_state *pipe_config)
4670 {
4671         struct drm_i915_private *dev_priv = to_i915(dev);
4672         u32 dpll = pipe_config->dpll_hw_state.dpll;
4673
4674         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4675                 return dev_priv->vbt.lvds_ssc_freq;
4676         else if (HAS_PCH_SPLIT(dev_priv))
4677                 return 120000;
4678         else if (DISPLAY_VER(dev_priv) != 2)
4679                 return 96000;
4680         else
4681                 return 48000;
4682 }
4683
4684 /* Returns the clock of the currently programmed mode of the given pipe. */
4685 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4686                          struct intel_crtc_state *pipe_config)
4687 {
4688         struct drm_device *dev = crtc->base.dev;
4689         struct drm_i915_private *dev_priv = to_i915(dev);
4690         u32 dpll = pipe_config->dpll_hw_state.dpll;
4691         u32 fp;
4692         struct dpll clock;
4693         int port_clock;
4694         int refclk = i9xx_pll_refclk(dev, pipe_config);
4695
4696         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4697                 fp = pipe_config->dpll_hw_state.fp0;
4698         else
4699                 fp = pipe_config->dpll_hw_state.fp1;
4700
4701         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4702         if (IS_PINEVIEW(dev_priv)) {
4703                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4704                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4705         } else {
4706                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4707                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4708         }
4709
4710         if (DISPLAY_VER(dev_priv) != 2) {
4711                 if (IS_PINEVIEW(dev_priv))
4712                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4713                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4714                 else
4715                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4716                                DPLL_FPA01_P1_POST_DIV_SHIFT);
4717
4718                 switch (dpll & DPLL_MODE_MASK) {
4719                 case DPLLB_MODE_DAC_SERIAL:
4720                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4721                                 5 : 10;
4722                         break;
4723                 case DPLLB_MODE_LVDS:
4724                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4725                                 7 : 14;
4726                         break;
4727                 default:
4728                         drm_dbg_kms(&dev_priv->drm,
4729                                     "Unknown DPLL mode %08x in programmed "
4730                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
4731                         return;
4732                 }
4733
4734                 if (IS_PINEVIEW(dev_priv))
4735                         port_clock = pnv_calc_dpll_params(refclk, &clock);
4736                 else
4737                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
4738         } else {
4739                 enum pipe lvds_pipe;
4740
4741                 if (IS_I85X(dev_priv) &&
4742                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4743                     lvds_pipe == crtc->pipe) {
4744                         u32 lvds = intel_de_read(dev_priv, LVDS);
4745
4746                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4747                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
4748
4749                         if (lvds & LVDS_CLKB_POWER_UP)
4750                                 clock.p2 = 7;
4751                         else
4752                                 clock.p2 = 14;
4753                 } else {
4754                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
4755                                 clock.p1 = 2;
4756                         else {
4757                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4758                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4759                         }
4760                         if (dpll & PLL_P2_DIVIDE_BY_4)
4761                                 clock.p2 = 4;
4762                         else
4763                                 clock.p2 = 2;
4764                 }
4765
4766                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4767         }
4768
4769         /*
4770          * This value includes pixel_multiplier. We will use
4771          * port_clock to compute adjusted_mode.crtc_clock in the
4772          * encoder's get_config() function.
4773          */
4774         pipe_config->port_clock = port_clock;
4775 }
4776
4777 int intel_dotclock_calculate(int link_freq,
4778                              const struct intel_link_m_n *m_n)
4779 {
4780         /*
4781          * The calculation for the data clock is:
4782          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4783          * But we want to avoid losing precison if possible, so:
4784          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4785          *
4786          * and the link clock is simpler:
4787          * link_clock = (m * link_clock) / n
4788          */
4789
4790         if (!m_n->link_n)
4791                 return 0;
4792
4793         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4794 }
4795
4796 /* Returns the currently programmed mode of the given encoder. */
4797 struct drm_display_mode *
4798 intel_encoder_current_mode(struct intel_encoder *encoder)
4799 {
4800         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4801         struct intel_crtc_state *crtc_state;
4802         struct drm_display_mode *mode;
4803         struct intel_crtc *crtc;
4804         enum pipe pipe;
4805
4806         if (!encoder->get_hw_state(encoder, &pipe))
4807                 return NULL;
4808
4809         crtc = intel_crtc_for_pipe(dev_priv, pipe);
4810
4811         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4812         if (!mode)
4813                 return NULL;
4814
4815         crtc_state = intel_crtc_state_alloc(crtc);
4816         if (!crtc_state) {
4817                 kfree(mode);
4818                 return NULL;
4819         }
4820
4821         if (!intel_crtc_get_pipe_config(crtc_state)) {
4822                 kfree(crtc_state);
4823                 kfree(mode);
4824                 return NULL;
4825         }
4826
4827         intel_encoder_get_config(encoder, crtc_state);
4828
4829         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4830
4831         kfree(crtc_state);
4832
4833         return mode;
4834 }
4835
4836 /**
4837  * intel_wm_need_update - Check whether watermarks need updating
4838  * @cur: current plane state
4839  * @new: new plane state
4840  *
4841  * Check current plane state versus the new one to determine whether
4842  * watermarks need to be recalculated.
4843  *
4844  * Returns true or false.
4845  */
4846 static bool intel_wm_need_update(const struct intel_plane_state *cur,
4847                                  struct intel_plane_state *new)
4848 {
4849         /* Update watermarks on tiling or size changes. */
4850         if (new->uapi.visible != cur->uapi.visible)
4851                 return true;
4852
4853         if (!cur->hw.fb || !new->hw.fb)
4854                 return false;
4855
4856         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4857             cur->hw.rotation != new->hw.rotation ||
4858             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4859             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4860             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4861             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4862                 return true;
4863
4864         return false;
4865 }
4866
4867 static bool needs_scaling(const struct intel_plane_state *state)
4868 {
4869         int src_w = drm_rect_width(&state->uapi.src) >> 16;
4870         int src_h = drm_rect_height(&state->uapi.src) >> 16;
4871         int dst_w = drm_rect_width(&state->uapi.dst);
4872         int dst_h = drm_rect_height(&state->uapi.dst);
4873
4874         return (src_w != dst_w || src_h != dst_h);
4875 }
4876
4877 static bool intel_plane_do_async_flip(struct intel_plane *plane,
4878                                       const struct intel_crtc_state *old_crtc_state,
4879                                       const struct intel_crtc_state *new_crtc_state)
4880 {
4881         struct drm_i915_private *i915 = to_i915(plane->base.dev);
4882
4883         if (!plane->async_flip)
4884                 return false;
4885
4886         if (!new_crtc_state->uapi.async_flip)
4887                 return false;
4888
4889         /*
4890          * In platforms after DISPLAY13, we might need to override
4891          * first async flip in order to change watermark levels
4892          * as part of optimization.
4893          * So for those, we are checking if this is a first async flip.
4894          * For platforms earlier than DISPLAY13 we always do async flip.
4895          */
4896         return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
4897 }
4898
4899 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4900                                     struct intel_crtc_state *new_crtc_state,
4901                                     const struct intel_plane_state *old_plane_state,
4902                                     struct intel_plane_state *new_plane_state)
4903 {
4904         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4905         struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4906         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4907         bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4908         bool was_crtc_enabled = old_crtc_state->hw.active;
4909         bool is_crtc_enabled = new_crtc_state->hw.active;
4910         bool turn_off, turn_on, visible, was_visible;
4911         int ret;
4912
4913         if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4914                 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4915                 if (ret)
4916                         return ret;
4917         }
4918
4919         was_visible = old_plane_state->uapi.visible;
4920         visible = new_plane_state->uapi.visible;
4921
4922         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4923                 was_visible = false;
4924
4925         /*
4926          * Visibility is calculated as if the crtc was on, but
4927          * after scaler setup everything depends on it being off
4928          * when the crtc isn't active.
4929          *
4930          * FIXME this is wrong for watermarks. Watermarks should also
4931          * be computed as if the pipe would be active. Perhaps move
4932          * per-plane wm computation to the .check_plane() hook, and
4933          * only combine the results from all planes in the current place?
4934          */
4935         if (!is_crtc_enabled) {
4936                 intel_plane_set_invisible(new_crtc_state, new_plane_state);
4937                 visible = false;
4938         }
4939
4940         if (!was_visible && !visible)
4941                 return 0;
4942
4943         turn_off = was_visible && (!visible || mode_changed);
4944         turn_on = visible && (!was_visible || mode_changed);
4945
4946         drm_dbg_atomic(&dev_priv->drm,
4947                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4948                        crtc->base.base.id, crtc->base.name,
4949                        plane->base.base.id, plane->base.name,
4950                        was_visible, visible,
4951                        turn_off, turn_on, mode_changed);
4952
4953         if (turn_on) {
4954                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4955                         new_crtc_state->update_wm_pre = true;
4956
4957                 /* must disable cxsr around plane enable/disable */
4958                 if (plane->id != PLANE_CURSOR)
4959                         new_crtc_state->disable_cxsr = true;
4960         } else if (turn_off) {
4961                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4962                         new_crtc_state->update_wm_post = true;
4963
4964                 /* must disable cxsr around plane enable/disable */
4965                 if (plane->id != PLANE_CURSOR)
4966                         new_crtc_state->disable_cxsr = true;
4967         } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
4968                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
4969                         /* FIXME bollocks */
4970                         new_crtc_state->update_wm_pre = true;
4971                         new_crtc_state->update_wm_post = true;
4972                 }
4973         }
4974
4975         if (visible || was_visible)
4976                 new_crtc_state->fb_bits |= plane->frontbuffer_bit;
4977
4978         /*
4979          * ILK/SNB DVSACNTR/Sprite Enable
4980          * IVB SPR_CTL/Sprite Enable
4981          * "When in Self Refresh Big FIFO mode, a write to enable the
4982          *  plane will be internally buffered and delayed while Big FIFO
4983          *  mode is exiting."
4984          *
4985          * Which means that enabling the sprite can take an extra frame
4986          * when we start in big FIFO mode (LP1+). Thus we need to drop
4987          * down to LP0 and wait for vblank in order to make sure the
4988          * sprite gets enabled on the next vblank after the register write.
4989          * Doing otherwise would risk enabling the sprite one frame after
4990          * we've already signalled flip completion. We can resume LP1+
4991          * once the sprite has been enabled.
4992          *
4993          *
4994          * WaCxSRDisabledForSpriteScaling:ivb
4995          * IVB SPR_SCALE/Scaling Enable
4996          * "Low Power watermarks must be disabled for at least one
4997          *  frame before enabling sprite scaling, and kept disabled
4998          *  until sprite scaling is disabled."
4999          *
5000          * ILK/SNB DVSASCALE/Scaling Enable
5001          * "When in Self Refresh Big FIFO mode, scaling enable will be
5002          *  masked off while Big FIFO mode is exiting."
5003          *
5004          * Despite the w/a only being listed for IVB we assume that
5005          * the ILK/SNB note has similar ramifications, hence we apply
5006          * the w/a on all three platforms.
5007          *
5008          * With experimental results seems this is needed also for primary
5009          * plane, not only sprite plane.
5010          */
5011         if (plane->id != PLANE_CURSOR &&
5012             (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5013              IS_IVYBRIDGE(dev_priv)) &&
5014             (turn_on || (!needs_scaling(old_plane_state) &&
5015                          needs_scaling(new_plane_state))))
5016                 new_crtc_state->disable_lp_wm = true;
5017
5018         if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
5019                 new_plane_state->do_async_flip = true;
5020
5021         return 0;
5022 }
5023
5024 static bool encoders_cloneable(const struct intel_encoder *a,
5025                                const struct intel_encoder *b)
5026 {
5027         /* masks could be asymmetric, so check both ways */
5028         return a == b || (a->cloneable & (1 << b->type) &&
5029                           b->cloneable & (1 << a->type));
5030 }
5031
5032 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5033                                          struct intel_crtc *crtc,
5034                                          struct intel_encoder *encoder)
5035 {
5036         struct intel_encoder *source_encoder;
5037         struct drm_connector *connector;
5038         struct drm_connector_state *connector_state;
5039         int i;
5040
5041         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5042                 if (connector_state->crtc != &crtc->base)
5043                         continue;
5044
5045                 source_encoder =
5046                         to_intel_encoder(connector_state->best_encoder);
5047                 if (!encoders_cloneable(encoder, source_encoder))
5048                         return false;
5049         }
5050
5051         return true;
5052 }
5053
5054 static int icl_add_linked_planes(struct intel_atomic_state *state)
5055 {
5056         struct intel_plane *plane, *linked;
5057         struct intel_plane_state *plane_state, *linked_plane_state;
5058         int i;
5059
5060         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5061                 linked = plane_state->planar_linked_plane;
5062
5063                 if (!linked)
5064                         continue;
5065
5066                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
5067                 if (IS_ERR(linked_plane_state))
5068                         return PTR_ERR(linked_plane_state);
5069
5070                 drm_WARN_ON(state->base.dev,
5071                             linked_plane_state->planar_linked_plane != plane);
5072                 drm_WARN_ON(state->base.dev,
5073                             linked_plane_state->planar_slave == plane_state->planar_slave);
5074         }
5075
5076         return 0;
5077 }
5078
5079 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5080 {
5081         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5082         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5083         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5084         struct intel_plane *plane, *linked;
5085         struct intel_plane_state *plane_state;
5086         int i;
5087
5088         if (DISPLAY_VER(dev_priv) < 11)
5089                 return 0;
5090
5091         /*
5092          * Destroy all old plane links and make the slave plane invisible
5093          * in the crtc_state->active_planes mask.
5094          */
5095         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5096                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5097                         continue;
5098
5099                 plane_state->planar_linked_plane = NULL;
5100                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
5101                         crtc_state->enabled_planes &= ~BIT(plane->id);
5102                         crtc_state->active_planes &= ~BIT(plane->id);
5103                         crtc_state->update_planes |= BIT(plane->id);
5104                 }
5105
5106                 plane_state->planar_slave = false;
5107         }
5108
5109         if (!crtc_state->nv12_planes)
5110                 return 0;
5111
5112         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5113                 struct intel_plane_state *linked_state = NULL;
5114
5115                 if (plane->pipe != crtc->pipe ||
5116                     !(crtc_state->nv12_planes & BIT(plane->id)))
5117                         continue;
5118
5119                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5120                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5121                                 continue;
5122
5123                         if (crtc_state->active_planes & BIT(linked->id))
5124                                 continue;
5125
5126                         linked_state = intel_atomic_get_plane_state(state, linked);
5127                         if (IS_ERR(linked_state))
5128                                 return PTR_ERR(linked_state);
5129
5130                         break;
5131                 }
5132
5133                 if (!linked_state) {
5134                         drm_dbg_kms(&dev_priv->drm,
5135                                     "Need %d free Y planes for planar YUV\n",
5136                                     hweight8(crtc_state->nv12_planes));
5137
5138                         return -EINVAL;
5139                 }
5140
5141                 plane_state->planar_linked_plane = linked;
5142
5143                 linked_state->planar_slave = true;
5144                 linked_state->planar_linked_plane = plane;
5145                 crtc_state->enabled_planes |= BIT(linked->id);
5146                 crtc_state->active_planes |= BIT(linked->id);
5147                 crtc_state->update_planes |= BIT(linked->id);
5148                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5149                             linked->base.name, plane->base.name);
5150
5151                 /* Copy parameters to slave plane */
5152                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5153                 linked_state->color_ctl = plane_state->color_ctl;
5154                 linked_state->view = plane_state->view;
5155                 linked_state->decrypt = plane_state->decrypt;
5156
5157                 intel_plane_copy_hw_state(linked_state, plane_state);
5158                 linked_state->uapi.src = plane_state->uapi.src;
5159                 linked_state->uapi.dst = plane_state->uapi.dst;
5160
5161                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
5162                         if (linked->id == PLANE_SPRITE5)
5163                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
5164                         else if (linked->id == PLANE_SPRITE4)
5165                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
5166                         else if (linked->id == PLANE_SPRITE3)
5167                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
5168                         else if (linked->id == PLANE_SPRITE2)
5169                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
5170                         else
5171                                 MISSING_CASE(linked->id);
5172                 }
5173         }
5174
5175         return 0;
5176 }
5177
5178 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5179 {
5180         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5181         struct intel_atomic_state *state =
5182                 to_intel_atomic_state(new_crtc_state->uapi.state);
5183         const struct intel_crtc_state *old_crtc_state =
5184                 intel_atomic_get_old_crtc_state(state, crtc);
5185
5186         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5187 }
5188
5189 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5190 {
5191         const struct drm_display_mode *pipe_mode =
5192                 &crtc_state->hw.pipe_mode;
5193         int linetime_wm;
5194
5195         if (!crtc_state->hw.enable)
5196                 return 0;
5197
5198         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5199                                         pipe_mode->crtc_clock);
5200
5201         return min(linetime_wm, 0x1ff);
5202 }
5203
5204 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5205                                const struct intel_cdclk_state *cdclk_state)
5206 {
5207         const struct drm_display_mode *pipe_mode =
5208                 &crtc_state->hw.pipe_mode;
5209         int linetime_wm;
5210
5211         if (!crtc_state->hw.enable)
5212                 return 0;
5213
5214         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5215                                         cdclk_state->logical.cdclk);
5216
5217         return min(linetime_wm, 0x1ff);
5218 }
5219
5220 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5221 {
5222         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5223         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5224         const struct drm_display_mode *pipe_mode =
5225                 &crtc_state->hw.pipe_mode;
5226         int linetime_wm;
5227
5228         if (!crtc_state->hw.enable)
5229                 return 0;
5230
5231         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5232                                    crtc_state->pixel_rate);
5233
5234         /* Display WA #1135: BXT:ALL GLK:ALL */
5235         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5236             dev_priv->ipc_enabled)
5237                 linetime_wm /= 2;
5238
5239         return min(linetime_wm, 0x1ff);
5240 }
5241
5242 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5243                                    struct intel_crtc *crtc)
5244 {
5245         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5246         struct intel_crtc_state *crtc_state =
5247                 intel_atomic_get_new_crtc_state(state, crtc);
5248         const struct intel_cdclk_state *cdclk_state;
5249
5250         if (DISPLAY_VER(dev_priv) >= 9)
5251                 crtc_state->linetime = skl_linetime_wm(crtc_state);
5252         else
5253                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
5254
5255         if (!hsw_crtc_supports_ips(crtc))
5256                 return 0;
5257
5258         cdclk_state = intel_atomic_get_cdclk_state(state);
5259         if (IS_ERR(cdclk_state))
5260                 return PTR_ERR(cdclk_state);
5261
5262         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5263                                                        cdclk_state);
5264
5265         return 0;
5266 }
5267
5268 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5269                                    struct intel_crtc *crtc)
5270 {
5271         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5272         struct intel_crtc_state *crtc_state =
5273                 intel_atomic_get_new_crtc_state(state, crtc);
5274         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5275         int ret;
5276
5277         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5278             mode_changed && !crtc_state->hw.active)
5279                 crtc_state->update_wm_post = true;
5280
5281         if (mode_changed && crtc_state->hw.enable &&
5282             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5283                 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5284                 if (ret)
5285                         return ret;
5286         }
5287
5288         /*
5289          * May need to update pipe gamma enable bits
5290          * when C8 planes are getting enabled/disabled.
5291          */
5292         if (c8_planes_changed(crtc_state))
5293                 crtc_state->uapi.color_mgmt_changed = true;
5294
5295         if (mode_changed || crtc_state->update_pipe ||
5296             crtc_state->uapi.color_mgmt_changed) {
5297                 ret = intel_color_check(crtc_state);
5298                 if (ret)
5299                         return ret;
5300         }
5301
5302         ret = intel_compute_pipe_wm(state, crtc);
5303         if (ret) {
5304                 drm_dbg_kms(&dev_priv->drm,
5305                             "Target pipe watermarks are invalid\n");
5306                 return ret;
5307         }
5308
5309         /*
5310          * Calculate 'intermediate' watermarks that satisfy both the
5311          * old state and the new state.  We can program these
5312          * immediately.
5313          */
5314         ret = intel_compute_intermediate_wm(state, crtc);
5315         if (ret) {
5316                 drm_dbg_kms(&dev_priv->drm,
5317                             "No valid intermediate pipe watermarks are possible\n");
5318                 return ret;
5319         }
5320
5321         if (DISPLAY_VER(dev_priv) >= 9) {
5322                 if (mode_changed || crtc_state->update_pipe) {
5323                         ret = skl_update_scaler_crtc(crtc_state);
5324                         if (ret)
5325                                 return ret;
5326                 }
5327
5328                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5329                 if (ret)
5330                         return ret;
5331         }
5332
5333         if (HAS_IPS(dev_priv)) {
5334                 ret = hsw_compute_ips_config(crtc_state);
5335                 if (ret)
5336                         return ret;
5337         }
5338
5339         if (DISPLAY_VER(dev_priv) >= 9 ||
5340             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5341                 ret = hsw_compute_linetime_wm(state, crtc);
5342                 if (ret)
5343                         return ret;
5344
5345         }
5346
5347         ret = intel_psr2_sel_fetch_update(state, crtc);
5348         if (ret)
5349                 return ret;
5350
5351         return 0;
5352 }
5353
5354 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5355 {
5356         struct intel_connector *connector;
5357         struct drm_connector_list_iter conn_iter;
5358
5359         drm_connector_list_iter_begin(dev, &conn_iter);
5360         for_each_intel_connector_iter(connector, &conn_iter) {
5361                 struct drm_connector_state *conn_state = connector->base.state;
5362                 struct intel_encoder *encoder =
5363                         to_intel_encoder(connector->base.encoder);
5364
5365                 if (conn_state->crtc)
5366                         drm_connector_put(&connector->base);
5367
5368                 if (encoder) {
5369                         struct intel_crtc *crtc =
5370                                 to_intel_crtc(encoder->base.crtc);
5371                         const struct intel_crtc_state *crtc_state =
5372                                 to_intel_crtc_state(crtc->base.state);
5373
5374                         conn_state->best_encoder = &encoder->base;
5375                         conn_state->crtc = &crtc->base;
5376                         conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5377
5378                         drm_connector_get(&connector->base);
5379                 } else {
5380                         conn_state->best_encoder = NULL;
5381                         conn_state->crtc = NULL;
5382                 }
5383         }
5384         drm_connector_list_iter_end(&conn_iter);
5385 }
5386
5387 static int
5388 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5389                       struct intel_crtc_state *pipe_config)
5390 {
5391         struct drm_connector *connector = conn_state->connector;
5392         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5393         const struct drm_display_info *info = &connector->display_info;
5394         int bpp;
5395
5396         switch (conn_state->max_bpc) {
5397         case 6 ... 7:
5398                 bpp = 6 * 3;
5399                 break;
5400         case 8 ... 9:
5401                 bpp = 8 * 3;
5402                 break;
5403         case 10 ... 11:
5404                 bpp = 10 * 3;
5405                 break;
5406         case 12 ... 16:
5407                 bpp = 12 * 3;
5408                 break;
5409         default:
5410                 MISSING_CASE(conn_state->max_bpc);
5411                 return -EINVAL;
5412         }
5413
5414         if (bpp < pipe_config->pipe_bpp) {
5415                 drm_dbg_kms(&i915->drm,
5416                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5417                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5418                             connector->base.id, connector->name,
5419                             bpp, 3 * info->bpc,
5420                             3 * conn_state->max_requested_bpc,
5421                             pipe_config->pipe_bpp);
5422
5423                 pipe_config->pipe_bpp = bpp;
5424         }
5425
5426         return 0;
5427 }
5428
5429 static int
5430 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5431                           struct intel_crtc_state *pipe_config)
5432 {
5433         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5434         struct drm_atomic_state *state = pipe_config->uapi.state;
5435         struct drm_connector *connector;
5436         struct drm_connector_state *connector_state;
5437         int bpp, i;
5438
5439         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5440             IS_CHERRYVIEW(dev_priv)))
5441                 bpp = 10*3;
5442         else if (DISPLAY_VER(dev_priv) >= 5)
5443                 bpp = 12*3;
5444         else
5445                 bpp = 8*3;
5446
5447         pipe_config->pipe_bpp = bpp;
5448
5449         /* Clamp display bpp to connector max bpp */
5450         for_each_new_connector_in_state(state, connector, connector_state, i) {
5451                 int ret;
5452
5453                 if (connector_state->crtc != &crtc->base)
5454                         continue;
5455
5456                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5457                 if (ret)
5458                         return ret;
5459         }
5460
5461         return 0;
5462 }
5463
5464 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5465                                     const struct drm_display_mode *mode)
5466 {
5467         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5468                     "type: 0x%x flags: 0x%x\n",
5469                     mode->crtc_clock,
5470                     mode->crtc_hdisplay, mode->crtc_hsync_start,
5471                     mode->crtc_hsync_end, mode->crtc_htotal,
5472                     mode->crtc_vdisplay, mode->crtc_vsync_start,
5473                     mode->crtc_vsync_end, mode->crtc_vtotal,
5474                     mode->type, mode->flags);
5475 }
5476
5477 static void
5478 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5479                       const char *id, unsigned int lane_count,
5480                       const struct intel_link_m_n *m_n)
5481 {
5482         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5483
5484         drm_dbg_kms(&i915->drm,
5485                     "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5486                     id, lane_count,
5487                     m_n->data_m, m_n->data_n,
5488                     m_n->link_m, m_n->link_n, m_n->tu);
5489 }
5490
5491 static void
5492 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5493                      const union hdmi_infoframe *frame)
5494 {
5495         if (!drm_debug_enabled(DRM_UT_KMS))
5496                 return;
5497
5498         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5499 }
5500
5501 static void
5502 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5503                       const struct drm_dp_vsc_sdp *vsc)
5504 {
5505         if (!drm_debug_enabled(DRM_UT_KMS))
5506                 return;
5507
5508         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5509 }
5510
5511 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5512
5513 static const char * const output_type_str[] = {
5514         OUTPUT_TYPE(UNUSED),
5515         OUTPUT_TYPE(ANALOG),
5516         OUTPUT_TYPE(DVO),
5517         OUTPUT_TYPE(SDVO),
5518         OUTPUT_TYPE(LVDS),
5519         OUTPUT_TYPE(TVOUT),
5520         OUTPUT_TYPE(HDMI),
5521         OUTPUT_TYPE(DP),
5522         OUTPUT_TYPE(EDP),
5523         OUTPUT_TYPE(DSI),
5524         OUTPUT_TYPE(DDI),
5525         OUTPUT_TYPE(DP_MST),
5526 };
5527
5528 #undef OUTPUT_TYPE
5529
5530 static void snprintf_output_types(char *buf, size_t len,
5531                                   unsigned int output_types)
5532 {
5533         char *str = buf;
5534         int i;
5535
5536         str[0] = '\0';
5537
5538         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5539                 int r;
5540
5541                 if ((output_types & BIT(i)) == 0)
5542                         continue;
5543
5544                 r = snprintf(str, len, "%s%s",
5545                              str != buf ? "," : "", output_type_str[i]);
5546                 if (r >= len)
5547                         break;
5548                 str += r;
5549                 len -= r;
5550
5551                 output_types &= ~BIT(i);
5552         }
5553
5554         WARN_ON_ONCE(output_types != 0);
5555 }
5556
5557 static const char * const output_format_str[] = {
5558         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5559         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5560         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5561 };
5562
5563 static const char *output_formats(enum intel_output_format format)
5564 {
5565         if (format >= ARRAY_SIZE(output_format_str))
5566                 return "invalid";
5567         return output_format_str[format];
5568 }
5569
5570 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5571 {
5572         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5573         struct drm_i915_private *i915 = to_i915(plane->base.dev);
5574         const struct drm_framebuffer *fb = plane_state->hw.fb;
5575
5576         if (!fb) {
5577                 drm_dbg_kms(&i915->drm,
5578                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5579                             plane->base.base.id, plane->base.name,
5580                             yesno(plane_state->uapi.visible));
5581                 return;
5582         }
5583
5584         drm_dbg_kms(&i915->drm,
5585                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5586                     plane->base.base.id, plane->base.name,
5587                     fb->base.id, fb->width, fb->height, &fb->format->format,
5588                     fb->modifier, yesno(plane_state->uapi.visible));
5589         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5590                     plane_state->hw.rotation, plane_state->scaler_id);
5591         if (plane_state->uapi.visible)
5592                 drm_dbg_kms(&i915->drm,
5593                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5594                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
5595                             DRM_RECT_ARG(&plane_state->uapi.dst));
5596 }
5597
5598 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5599                                    struct intel_atomic_state *state,
5600                                    const char *context)
5601 {
5602         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5603         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5604         const struct intel_plane_state *plane_state;
5605         struct intel_plane *plane;
5606         char buf[64];
5607         int i;
5608
5609         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5610                     crtc->base.base.id, crtc->base.name,
5611                     yesno(pipe_config->hw.enable), context);
5612
5613         if (!pipe_config->hw.enable)
5614                 goto dump_planes;
5615
5616         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5617         drm_dbg_kms(&dev_priv->drm,
5618                     "active: %s, output_types: %s (0x%x), output format: %s\n",
5619                     yesno(pipe_config->hw.active),
5620                     buf, pipe_config->output_types,
5621                     output_formats(pipe_config->output_format));
5622
5623         drm_dbg_kms(&dev_priv->drm,
5624                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5625                     transcoder_name(pipe_config->cpu_transcoder),
5626                     pipe_config->pipe_bpp, pipe_config->dither);
5627
5628         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5629                     transcoder_name(pipe_config->mst_master_transcoder));
5630
5631         drm_dbg_kms(&dev_priv->drm,
5632                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5633                     transcoder_name(pipe_config->master_transcoder),
5634                     pipe_config->sync_mode_slaves_mask);
5635
5636         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5637                     pipe_config->bigjoiner_slave ? "slave" :
5638                     pipe_config->bigjoiner ? "master" : "no");
5639
5640         drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5641                     enableddisabled(pipe_config->splitter.enable),
5642                     pipe_config->splitter.link_count,
5643                     pipe_config->splitter.pixel_overlap);
5644
5645         if (pipe_config->has_pch_encoder)
5646                 intel_dump_m_n_config(pipe_config, "fdi",
5647                                       pipe_config->fdi_lanes,
5648                                       &pipe_config->fdi_m_n);
5649
5650         if (intel_crtc_has_dp_encoder(pipe_config)) {
5651                 intel_dump_m_n_config(pipe_config, "dp m_n",
5652                                 pipe_config->lane_count, &pipe_config->dp_m_n);
5653                 if (pipe_config->has_drrs)
5654                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
5655                                               pipe_config->lane_count,
5656                                               &pipe_config->dp_m2_n2);
5657         }
5658
5659         drm_dbg_kms(&dev_priv->drm,
5660                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5661                     pipe_config->has_audio, pipe_config->has_infoframe,
5662                     pipe_config->infoframes.enable);
5663
5664         if (pipe_config->infoframes.enable &
5665             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5666                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5667                             pipe_config->infoframes.gcp);
5668         if (pipe_config->infoframes.enable &
5669             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5670                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5671         if (pipe_config->infoframes.enable &
5672             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5673                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5674         if (pipe_config->infoframes.enable &
5675             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5676                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5677         if (pipe_config->infoframes.enable &
5678             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5679                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5680         if (pipe_config->infoframes.enable &
5681             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5682                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5683         if (pipe_config->infoframes.enable &
5684             intel_hdmi_infoframe_enable(DP_SDP_VSC))
5685                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5686
5687         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5688                     yesno(pipe_config->vrr.enable),
5689                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5690                     pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5691                     pipe_config->vrr.flipline,
5692                     intel_vrr_vmin_vblank_start(pipe_config),
5693                     intel_vrr_vmax_vblank_start(pipe_config));
5694
5695         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5696         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5697         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5698         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5699         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5700         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5701         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5702         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5703         drm_dbg_kms(&dev_priv->drm,
5704                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5705                     pipe_config->port_clock,
5706                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5707                     pipe_config->pixel_rate);
5708
5709         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5710                     pipe_config->linetime, pipe_config->ips_linetime);
5711
5712         if (DISPLAY_VER(dev_priv) >= 9)
5713                 drm_dbg_kms(&dev_priv->drm,
5714                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5715                             crtc->num_scalers,
5716                             pipe_config->scaler_state.scaler_users,
5717                             pipe_config->scaler_state.scaler_id);
5718
5719         if (HAS_GMCH(dev_priv))
5720                 drm_dbg_kms(&dev_priv->drm,
5721                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5722                             pipe_config->gmch_pfit.control,
5723                             pipe_config->gmch_pfit.pgm_ratios,
5724                             pipe_config->gmch_pfit.lvds_border_bits);
5725         else
5726                 drm_dbg_kms(&dev_priv->drm,
5727                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5728                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5729                             enableddisabled(pipe_config->pch_pfit.enabled),
5730                             yesno(pipe_config->pch_pfit.force_thru));
5731
5732         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5733                     pipe_config->ips_enabled, pipe_config->double_wide);
5734
5735         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5736
5737         if (IS_CHERRYVIEW(dev_priv))
5738                 drm_dbg_kms(&dev_priv->drm,
5739                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5740                             pipe_config->cgm_mode, pipe_config->gamma_mode,
5741                             pipe_config->gamma_enable, pipe_config->csc_enable);
5742         else
5743                 drm_dbg_kms(&dev_priv->drm,
5744                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5745                             pipe_config->csc_mode, pipe_config->gamma_mode,
5746                             pipe_config->gamma_enable, pipe_config->csc_enable);
5747
5748         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5749                     pipe_config->hw.degamma_lut ?
5750                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5751                     pipe_config->hw.gamma_lut ?
5752                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5753
5754 dump_planes:
5755         if (!state)
5756                 return;
5757
5758         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5759                 if (plane->pipe == crtc->pipe)
5760                         intel_dump_plane_state(plane_state);
5761         }
5762 }
5763
5764 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5765 {
5766         struct drm_device *dev = state->base.dev;
5767         struct drm_connector *connector;
5768         struct drm_connector_list_iter conn_iter;
5769         unsigned int used_ports = 0;
5770         unsigned int used_mst_ports = 0;
5771         bool ret = true;
5772
5773         /*
5774          * We're going to peek into connector->state,
5775          * hence connection_mutex must be held.
5776          */
5777         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5778
5779         /*
5780          * Walk the connector list instead of the encoder
5781          * list to detect the problem on ddi platforms
5782          * where there's just one encoder per digital port.
5783          */
5784         drm_connector_list_iter_begin(dev, &conn_iter);
5785         drm_for_each_connector_iter(connector, &conn_iter) {
5786                 struct drm_connector_state *connector_state;
5787                 struct intel_encoder *encoder;
5788
5789                 connector_state =
5790                         drm_atomic_get_new_connector_state(&state->base,
5791                                                            connector);
5792                 if (!connector_state)
5793                         connector_state = connector->state;
5794
5795                 if (!connector_state->best_encoder)
5796                         continue;
5797
5798                 encoder = to_intel_encoder(connector_state->best_encoder);
5799
5800                 drm_WARN_ON(dev, !connector_state->crtc);
5801
5802                 switch (encoder->type) {
5803                 case INTEL_OUTPUT_DDI:
5804                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5805                                 break;
5806                         fallthrough;
5807                 case INTEL_OUTPUT_DP:
5808                 case INTEL_OUTPUT_HDMI:
5809                 case INTEL_OUTPUT_EDP:
5810                         /* the same port mustn't appear more than once */
5811                         if (used_ports & BIT(encoder->port))
5812                                 ret = false;
5813
5814                         used_ports |= BIT(encoder->port);
5815                         break;
5816                 case INTEL_OUTPUT_DP_MST:
5817                         used_mst_ports |=
5818                                 1 << encoder->port;
5819                         break;
5820                 default:
5821                         break;
5822                 }
5823         }
5824         drm_connector_list_iter_end(&conn_iter);
5825
5826         /* can't mix MST and SST/HDMI on the same port */
5827         if (used_ports & used_mst_ports)
5828                 return false;
5829
5830         return ret;
5831 }
5832
5833 static void
5834 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5835                                            struct intel_crtc_state *crtc_state)
5836 {
5837         const struct intel_crtc_state *master_crtc_state;
5838         struct intel_crtc *master_crtc;
5839
5840         master_crtc = intel_master_crtc(crtc_state);
5841         master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5842
5843         /* No need to copy state if the master state is unchanged */
5844         if (master_crtc_state)
5845                 intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5846 }
5847
5848 static void
5849 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5850                                  struct intel_crtc_state *crtc_state)
5851 {
5852         crtc_state->hw.enable = crtc_state->uapi.enable;
5853         crtc_state->hw.active = crtc_state->uapi.active;
5854         crtc_state->hw.mode = crtc_state->uapi.mode;
5855         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5856         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5857
5858         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5859 }
5860
5861 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5862 {
5863         if (crtc_state->bigjoiner_slave)
5864                 return;
5865
5866         crtc_state->uapi.enable = crtc_state->hw.enable;
5867         crtc_state->uapi.active = crtc_state->hw.active;
5868         drm_WARN_ON(crtc_state->uapi.crtc->dev,
5869                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5870
5871         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5872         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5873
5874         /* copy color blobs to uapi */
5875         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5876                                   crtc_state->hw.degamma_lut);
5877         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5878                                   crtc_state->hw.gamma_lut);
5879         drm_property_replace_blob(&crtc_state->uapi.ctm,
5880                                   crtc_state->hw.ctm);
5881 }
5882
5883 static int
5884 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5885                           const struct intel_crtc_state *from_crtc_state)
5886 {
5887         struct intel_crtc_state *saved_state;
5888
5889         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5890         if (!saved_state)
5891                 return -ENOMEM;
5892
5893         saved_state->uapi = crtc_state->uapi;
5894         saved_state->scaler_state = crtc_state->scaler_state;
5895         saved_state->shared_dpll = crtc_state->shared_dpll;
5896         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5897         saved_state->crc_enabled = crtc_state->crc_enabled;
5898
5899         intel_crtc_free_hw_state(crtc_state);
5900         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5901         kfree(saved_state);
5902
5903         /* Re-init hw state */
5904         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5905         crtc_state->hw.enable = from_crtc_state->hw.enable;
5906         crtc_state->hw.active = from_crtc_state->hw.active;
5907         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5908         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5909
5910         /* Some fixups */
5911         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5912         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5913         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5914         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5915         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5916         crtc_state->bigjoiner_slave = true;
5917         crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5918         crtc_state->has_audio = from_crtc_state->has_audio;
5919
5920         return 0;
5921 }
5922
5923 static int
5924 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5925                                  struct intel_crtc_state *crtc_state)
5926 {
5927         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5928         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5929         struct intel_crtc_state *saved_state;
5930
5931         saved_state = intel_crtc_state_alloc(crtc);
5932         if (!saved_state)
5933                 return -ENOMEM;
5934
5935         /* free the old crtc_state->hw members */
5936         intel_crtc_free_hw_state(crtc_state);
5937
5938         /* FIXME: before the switch to atomic started, a new pipe_config was
5939          * kzalloc'd. Code that depends on any field being zero should be
5940          * fixed, so that the crtc_state can be safely duplicated. For now,
5941          * only fields that are know to not cause problems are preserved. */
5942
5943         saved_state->uapi = crtc_state->uapi;
5944         saved_state->scaler_state = crtc_state->scaler_state;
5945         saved_state->shared_dpll = crtc_state->shared_dpll;
5946         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5947         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5948                sizeof(saved_state->icl_port_dplls));
5949         saved_state->crc_enabled = crtc_state->crc_enabled;
5950         if (IS_G4X(dev_priv) ||
5951             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5952                 saved_state->wm = crtc_state->wm;
5953
5954         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5955         kfree(saved_state);
5956
5957         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5958
5959         return 0;
5960 }
5961
5962 static int
5963 intel_modeset_pipe_config(struct intel_atomic_state *state,
5964                           struct intel_crtc_state *pipe_config)
5965 {
5966         struct drm_crtc *crtc = pipe_config->uapi.crtc;
5967         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5968         struct drm_connector *connector;
5969         struct drm_connector_state *connector_state;
5970         int base_bpp, ret, i;
5971         bool retry = true;
5972
5973         pipe_config->cpu_transcoder =
5974                 (enum transcoder) to_intel_crtc(crtc)->pipe;
5975
5976         /*
5977          * Sanitize sync polarity flags based on requested ones. If neither
5978          * positive or negative polarity is requested, treat this as meaning
5979          * negative polarity.
5980          */
5981         if (!(pipe_config->hw.adjusted_mode.flags &
5982               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5983                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5984
5985         if (!(pipe_config->hw.adjusted_mode.flags &
5986               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5987                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5988
5989         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
5990                                         pipe_config);
5991         if (ret)
5992                 return ret;
5993
5994         base_bpp = pipe_config->pipe_bpp;
5995
5996         /*
5997          * Determine the real pipe dimensions. Note that stereo modes can
5998          * increase the actual pipe size due to the frame doubling and
5999          * insertion of additional space for blanks between the frame. This
6000          * is stored in the crtc timings. We use the requested mode to do this
6001          * computation to clearly distinguish it from the adjusted mode, which
6002          * can be changed by the connectors in the below retry loop.
6003          */
6004         drm_mode_get_hv_timing(&pipe_config->hw.mode,
6005                                &pipe_config->pipe_src_w,
6006                                &pipe_config->pipe_src_h);
6007
6008         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6009                 struct intel_encoder *encoder =
6010                         to_intel_encoder(connector_state->best_encoder);
6011
6012                 if (connector_state->crtc != crtc)
6013                         continue;
6014
6015                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6016                         drm_dbg_kms(&i915->drm,
6017                                     "rejecting invalid cloning configuration\n");
6018                         return -EINVAL;
6019                 }
6020
6021                 /*
6022                  * Determine output_types before calling the .compute_config()
6023                  * hooks so that the hooks can use this information safely.
6024                  */
6025                 if (encoder->compute_output_type)
6026                         pipe_config->output_types |=
6027                                 BIT(encoder->compute_output_type(encoder, pipe_config,
6028                                                                  connector_state));
6029                 else
6030                         pipe_config->output_types |= BIT(encoder->type);
6031         }
6032
6033 encoder_retry:
6034         /* Ensure the port clock defaults are reset when retrying. */
6035         pipe_config->port_clock = 0;
6036         pipe_config->pixel_multiplier = 1;
6037
6038         /* Fill in default crtc timings, allow encoders to overwrite them. */
6039         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6040                               CRTC_STEREO_DOUBLE);
6041
6042         /* Pass our mode to the connectors and the CRTC to give them a chance to
6043          * adjust it according to limitations or connector properties, and also
6044          * a chance to reject the mode entirely.
6045          */
6046         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6047                 struct intel_encoder *encoder =
6048                         to_intel_encoder(connector_state->best_encoder);
6049
6050                 if (connector_state->crtc != crtc)
6051                         continue;
6052
6053                 ret = encoder->compute_config(encoder, pipe_config,
6054                                               connector_state);
6055                 if (ret == -EDEADLK)
6056                         return ret;
6057                 if (ret < 0) {
6058                         drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6059                         return ret;
6060                 }
6061         }
6062
6063         /* Set default port clock if not overwritten by the encoder. Needs to be
6064          * done afterwards in case the encoder adjusts the mode. */
6065         if (!pipe_config->port_clock)
6066                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6067                         * pipe_config->pixel_multiplier;
6068
6069         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6070         if (ret == -EDEADLK)
6071                 return ret;
6072         if (ret == -EAGAIN) {
6073                 if (drm_WARN(&i915->drm, !retry,
6074                              "loop in pipe configuration computation\n"))
6075                         return -EINVAL;
6076
6077                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6078                 retry = false;
6079                 goto encoder_retry;
6080         }
6081         if (ret < 0) {
6082                 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6083                 return ret;
6084         }
6085
6086         /* Dithering seems to not pass-through bits correctly when it should, so
6087          * only enable it on 6bpc panels and when its not a compliance
6088          * test requesting 6bpc video pattern.
6089          */
6090         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6091                 !pipe_config->dither_force_disable;
6092         drm_dbg_kms(&i915->drm,
6093                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6094                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6095
6096         return 0;
6097 }
6098
6099 static int
6100 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6101 {
6102         struct intel_atomic_state *state =
6103                 to_intel_atomic_state(crtc_state->uapi.state);
6104         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6105         struct drm_connector_state *conn_state;
6106         struct drm_connector *connector;
6107         int i;
6108
6109         for_each_new_connector_in_state(&state->base, connector,
6110                                         conn_state, i) {
6111                 struct intel_encoder *encoder =
6112                         to_intel_encoder(conn_state->best_encoder);
6113                 int ret;
6114
6115                 if (conn_state->crtc != &crtc->base ||
6116                     !encoder->compute_config_late)
6117                         continue;
6118
6119                 ret = encoder->compute_config_late(encoder, crtc_state,
6120                                                    conn_state);
6121                 if (ret)
6122                         return ret;
6123         }
6124
6125         return 0;
6126 }
6127
6128 bool intel_fuzzy_clock_check(int clock1, int clock2)
6129 {
6130         int diff;
6131
6132         if (clock1 == clock2)
6133                 return true;
6134
6135         if (!clock1 || !clock2)
6136                 return false;
6137
6138         diff = abs(clock1 - clock2);
6139
6140         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6141                 return true;
6142
6143         return false;
6144 }
6145
6146 static bool
6147 intel_compare_m_n(unsigned int m, unsigned int n,
6148                   unsigned int m2, unsigned int n2,
6149                   bool exact)
6150 {
6151         if (m == m2 && n == n2)
6152                 return true;
6153
6154         if (exact || !m || !n || !m2 || !n2)
6155                 return false;
6156
6157         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6158
6159         if (n > n2) {
6160                 while (n > n2) {
6161                         m2 <<= 1;
6162                         n2 <<= 1;
6163                 }
6164         } else if (n < n2) {
6165                 while (n < n2) {
6166                         m <<= 1;
6167                         n <<= 1;
6168                 }
6169         }
6170
6171         if (n != n2)
6172                 return false;
6173
6174         return intel_fuzzy_clock_check(m, m2);
6175 }
6176
6177 static bool
6178 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6179                        const struct intel_link_m_n *m2_n2,
6180                        bool exact)
6181 {
6182         return m_n->tu == m2_n2->tu &&
6183                 intel_compare_m_n(m_n->data_m, m_n->data_n,
6184                                   m2_n2->data_m, m2_n2->data_n, exact) &&
6185                 intel_compare_m_n(m_n->link_m, m_n->link_n,
6186                                   m2_n2->link_m, m2_n2->link_n, exact);
6187 }
6188
6189 static bool
6190 intel_compare_infoframe(const union hdmi_infoframe *a,
6191                         const union hdmi_infoframe *b)
6192 {
6193         return memcmp(a, b, sizeof(*a)) == 0;
6194 }
6195
6196 static bool
6197 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6198                          const struct drm_dp_vsc_sdp *b)
6199 {
6200         return memcmp(a, b, sizeof(*a)) == 0;
6201 }
6202
6203 static void
6204 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6205                                bool fastset, const char *name,
6206                                const union hdmi_infoframe *a,
6207                                const union hdmi_infoframe *b)
6208 {
6209         if (fastset) {
6210                 if (!drm_debug_enabled(DRM_UT_KMS))
6211                         return;
6212
6213                 drm_dbg_kms(&dev_priv->drm,
6214                             "fastset mismatch in %s infoframe\n", name);
6215                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6216                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6217                 drm_dbg_kms(&dev_priv->drm, "found:\n");
6218                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6219         } else {
6220                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6221                 drm_err(&dev_priv->drm, "expected:\n");
6222                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6223                 drm_err(&dev_priv->drm, "found:\n");
6224                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6225         }
6226 }
6227
6228 static void
6229 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6230                                 bool fastset, const char *name,
6231                                 const struct drm_dp_vsc_sdp *a,
6232                                 const struct drm_dp_vsc_sdp *b)
6233 {
6234         if (fastset) {
6235                 if (!drm_debug_enabled(DRM_UT_KMS))
6236                         return;
6237
6238                 drm_dbg_kms(&dev_priv->drm,
6239                             "fastset mismatch in %s dp sdp\n", name);
6240                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6241                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6242                 drm_dbg_kms(&dev_priv->drm, "found:\n");
6243                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6244         } else {
6245                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6246                 drm_err(&dev_priv->drm, "expected:\n");
6247                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6248                 drm_err(&dev_priv->drm, "found:\n");
6249                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6250         }
6251 }
6252
6253 static void __printf(4, 5)
6254 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6255                      const char *name, const char *format, ...)
6256 {
6257         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6258         struct va_format vaf;
6259         va_list args;
6260
6261         va_start(args, format);
6262         vaf.fmt = format;
6263         vaf.va = &args;
6264
6265         if (fastset)
6266                 drm_dbg_kms(&i915->drm,
6267                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6268                             crtc->base.base.id, crtc->base.name, name, &vaf);
6269         else
6270                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6271                         crtc->base.base.id, crtc->base.name, name, &vaf);
6272
6273         va_end(args);
6274 }
6275
6276 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6277 {
6278         if (dev_priv->params.fastboot != -1)
6279                 return dev_priv->params.fastboot;
6280
6281         /* Enable fastboot by default on Skylake and newer */
6282         if (DISPLAY_VER(dev_priv) >= 9)
6283                 return true;
6284
6285         /* Enable fastboot by default on VLV and CHV */
6286         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6287                 return true;
6288
6289         /* Disabled by default on all others */
6290         return false;
6291 }
6292
6293 static bool
6294 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6295                           const struct intel_crtc_state *pipe_config,
6296                           bool fastset)
6297 {
6298         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6299         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6300         bool ret = true;
6301         u32 bp_gamma = 0;
6302         bool fixup_inherited = fastset &&
6303                 current_config->inherited && !pipe_config->inherited;
6304
6305         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6306                 drm_dbg_kms(&dev_priv->drm,
6307                             "initial modeset and fastboot not set\n");
6308                 ret = false;
6309         }
6310
6311 #define PIPE_CONF_CHECK_X(name) do { \
6312         if (current_config->name != pipe_config->name) { \
6313                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6314                                      "(expected 0x%08x, found 0x%08x)", \
6315                                      current_config->name, \
6316                                      pipe_config->name); \
6317                 ret = false; \
6318         } \
6319 } while (0)
6320
6321 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6322         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6323                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6324                                      "(expected 0x%08x, found 0x%08x)", \
6325                                      current_config->name & (mask), \
6326                                      pipe_config->name & (mask)); \
6327                 ret = false; \
6328         } \
6329 } while (0)
6330
6331 #define PIPE_CONF_CHECK_I(name) do { \
6332         if (current_config->name != pipe_config->name) { \
6333                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6334                                      "(expected %i, found %i)", \
6335                                      current_config->name, \
6336                                      pipe_config->name); \
6337                 ret = false; \
6338         } \
6339 } while (0)
6340
6341 #define PIPE_CONF_CHECK_BOOL(name) do { \
6342         if (current_config->name != pipe_config->name) { \
6343                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
6344                                      "(expected %s, found %s)", \
6345                                      yesno(current_config->name), \
6346                                      yesno(pipe_config->name)); \
6347                 ret = false; \
6348         } \
6349 } while (0)
6350
6351 /*
6352  * Checks state where we only read out the enabling, but not the entire
6353  * state itself (like full infoframes or ELD for audio). These states
6354  * require a full modeset on bootup to fix up.
6355  */
6356 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6357         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6358                 PIPE_CONF_CHECK_BOOL(name); \
6359         } else { \
6360                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6361                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6362                                      yesno(current_config->name), \
6363                                      yesno(pipe_config->name)); \
6364                 ret = false; \
6365         } \
6366 } while (0)
6367
6368 #define PIPE_CONF_CHECK_P(name) do { \
6369         if (current_config->name != pipe_config->name) { \
6370                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6371                                      "(expected %p, found %p)", \
6372                                      current_config->name, \
6373                                      pipe_config->name); \
6374                 ret = false; \
6375         } \
6376 } while (0)
6377
6378 #define PIPE_CONF_CHECK_M_N(name) do { \
6379         if (!intel_compare_link_m_n(&current_config->name, \
6380                                     &pipe_config->name,\
6381                                     !fastset)) { \
6382                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6383                                      "(expected tu %i data %i/%i link %i/%i, " \
6384                                      "found tu %i, data %i/%i link %i/%i)", \
6385                                      current_config->name.tu, \
6386                                      current_config->name.data_m, \
6387                                      current_config->name.data_n, \
6388                                      current_config->name.link_m, \
6389                                      current_config->name.link_n, \
6390                                      pipe_config->name.tu, \
6391                                      pipe_config->name.data_m, \
6392                                      pipe_config->name.data_n, \
6393                                      pipe_config->name.link_m, \
6394                                      pipe_config->name.link_n); \
6395                 ret = false; \
6396         } \
6397 } while (0)
6398
6399 /* This is required for BDW+ where there is only one set of registers for
6400  * switching between high and low RR.
6401  * This macro can be used whenever a comparison has to be made between one
6402  * hw state and multiple sw state variables.
6403  */
6404 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6405         if (!intel_compare_link_m_n(&current_config->name, \
6406                                     &pipe_config->name, !fastset) && \
6407             !intel_compare_link_m_n(&current_config->alt_name, \
6408                                     &pipe_config->name, !fastset)) { \
6409                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6410                                      "(expected tu %i data %i/%i link %i/%i, " \
6411                                      "or tu %i data %i/%i link %i/%i, " \
6412                                      "found tu %i, data %i/%i link %i/%i)", \
6413                                      current_config->name.tu, \
6414                                      current_config->name.data_m, \
6415                                      current_config->name.data_n, \
6416                                      current_config->name.link_m, \
6417                                      current_config->name.link_n, \
6418                                      current_config->alt_name.tu, \
6419                                      current_config->alt_name.data_m, \
6420                                      current_config->alt_name.data_n, \
6421                                      current_config->alt_name.link_m, \
6422                                      current_config->alt_name.link_n, \
6423                                      pipe_config->name.tu, \
6424                                      pipe_config->name.data_m, \
6425                                      pipe_config->name.data_n, \
6426                                      pipe_config->name.link_m, \
6427                                      pipe_config->name.link_n); \
6428                 ret = false; \
6429         } \
6430 } while (0)
6431
6432 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6433         if ((current_config->name ^ pipe_config->name) & (mask)) { \
6434                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6435                                      "(%x) (expected %i, found %i)", \
6436                                      (mask), \
6437                                      current_config->name & (mask), \
6438                                      pipe_config->name & (mask)); \
6439                 ret = false; \
6440         } \
6441 } while (0)
6442
6443 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6444         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6445                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6446                                      "(expected %i, found %i)", \
6447                                      current_config->name, \
6448                                      pipe_config->name); \
6449                 ret = false; \
6450         } \
6451 } while (0)
6452
6453 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6454         if (!intel_compare_infoframe(&current_config->infoframes.name, \
6455                                      &pipe_config->infoframes.name)) { \
6456                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6457                                                &current_config->infoframes.name, \
6458                                                &pipe_config->infoframes.name); \
6459                 ret = false; \
6460         } \
6461 } while (0)
6462
6463 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6464         if (!current_config->has_psr && !pipe_config->has_psr && \
6465             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
6466                                       &pipe_config->infoframes.name)) { \
6467                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6468                                                 &current_config->infoframes.name, \
6469                                                 &pipe_config->infoframes.name); \
6470                 ret = false; \
6471         } \
6472 } while (0)
6473
6474 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6475         if (current_config->name1 != pipe_config->name1) { \
6476                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6477                                 "(expected %i, found %i, won't compare lut values)", \
6478                                 current_config->name1, \
6479                                 pipe_config->name1); \
6480                 ret = false;\
6481         } else { \
6482                 if (!intel_color_lut_equal(current_config->name2, \
6483                                         pipe_config->name2, pipe_config->name1, \
6484                                         bit_precision)) { \
6485                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6486                                         "hw_state doesn't match sw_state"); \
6487                         ret = false; \
6488                 } \
6489         } \
6490 } while (0)
6491
6492 #define PIPE_CONF_QUIRK(quirk) \
6493         ((current_config->quirks | pipe_config->quirks) & (quirk))
6494
6495         PIPE_CONF_CHECK_I(cpu_transcoder);
6496
6497         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6498         PIPE_CONF_CHECK_I(fdi_lanes);
6499         PIPE_CONF_CHECK_M_N(fdi_m_n);
6500
6501         PIPE_CONF_CHECK_I(lane_count);
6502         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6503
6504         if (DISPLAY_VER(dev_priv) < 8) {
6505                 PIPE_CONF_CHECK_M_N(dp_m_n);
6506
6507                 if (current_config->has_drrs)
6508                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
6509         } else
6510                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6511
6512         PIPE_CONF_CHECK_X(output_types);
6513
6514         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6515         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6516         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6517         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6518         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6519         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6520
6521         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6522         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6523         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6524         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6525         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6526         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6527
6528         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6529         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6530         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6531         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6532         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6533         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6534
6535         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6536         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6537         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6538         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6539         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6540         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6541
6542         PIPE_CONF_CHECK_I(pixel_multiplier);
6543
6544         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6545                               DRM_MODE_FLAG_INTERLACE);
6546
6547         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6548                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6549                                       DRM_MODE_FLAG_PHSYNC);
6550                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6551                                       DRM_MODE_FLAG_NHSYNC);
6552                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6553                                       DRM_MODE_FLAG_PVSYNC);
6554                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6555                                       DRM_MODE_FLAG_NVSYNC);
6556         }
6557
6558         PIPE_CONF_CHECK_I(output_format);
6559         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6560         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6561             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6562                 PIPE_CONF_CHECK_BOOL(limited_color_range);
6563
6564         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6565         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6566         PIPE_CONF_CHECK_BOOL(has_infoframe);
6567         PIPE_CONF_CHECK_BOOL(fec_enable);
6568
6569         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6570
6571         PIPE_CONF_CHECK_X(gmch_pfit.control);
6572         /* pfit ratios are autocomputed by the hw on gen4+ */
6573         if (DISPLAY_VER(dev_priv) < 4)
6574                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6575         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6576
6577         /*
6578          * Changing the EDP transcoder input mux
6579          * (A_ONOFF vs. A_ON) requires a full modeset.
6580          */
6581         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6582
6583         if (!fastset) {
6584                 PIPE_CONF_CHECK_I(pipe_src_w);
6585                 PIPE_CONF_CHECK_I(pipe_src_h);
6586
6587                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6588                 if (current_config->pch_pfit.enabled) {
6589                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6590                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6591                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6592                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6593                 }
6594
6595                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6596                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6597
6598                 PIPE_CONF_CHECK_X(gamma_mode);
6599                 if (IS_CHERRYVIEW(dev_priv))
6600                         PIPE_CONF_CHECK_X(cgm_mode);
6601                 else
6602                         PIPE_CONF_CHECK_X(csc_mode);
6603                 PIPE_CONF_CHECK_BOOL(gamma_enable);
6604                 PIPE_CONF_CHECK_BOOL(csc_enable);
6605
6606                 PIPE_CONF_CHECK_I(linetime);
6607                 PIPE_CONF_CHECK_I(ips_linetime);
6608
6609                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6610                 if (bp_gamma)
6611                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6612
6613                 if (current_config->active_planes) {
6614                         PIPE_CONF_CHECK_BOOL(has_psr);
6615                         PIPE_CONF_CHECK_BOOL(has_psr2);
6616                         PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6617                         PIPE_CONF_CHECK_I(dc3co_exitline);
6618                 }
6619         }
6620
6621         PIPE_CONF_CHECK_BOOL(double_wide);
6622
6623         if (dev_priv->dpll.mgr) {
6624                 PIPE_CONF_CHECK_P(shared_dpll);
6625
6626                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6627                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6628                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6629                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6630                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6631                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6632                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6633                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6634                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6635                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6636                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6637                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6638                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6639                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6640                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6641                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6642                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6643                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6644                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6645                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6646                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6647                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6648                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6649                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6650                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6651                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6652                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6653                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6654                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6655                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6656                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6657         }
6658
6659         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6660         PIPE_CONF_CHECK_X(dsi_pll.div);
6661
6662         if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6663                 PIPE_CONF_CHECK_I(pipe_bpp);
6664
6665         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6666         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6667         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6668
6669         PIPE_CONF_CHECK_I(min_voltage_level);
6670
6671         if (current_config->has_psr || pipe_config->has_psr)
6672                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6673                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6674         else
6675                 PIPE_CONF_CHECK_X(infoframes.enable);
6676
6677         PIPE_CONF_CHECK_X(infoframes.gcp);
6678         PIPE_CONF_CHECK_INFOFRAME(avi);
6679         PIPE_CONF_CHECK_INFOFRAME(spd);
6680         PIPE_CONF_CHECK_INFOFRAME(hdmi);
6681         PIPE_CONF_CHECK_INFOFRAME(drm);
6682         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6683
6684         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6685         PIPE_CONF_CHECK_I(master_transcoder);
6686         PIPE_CONF_CHECK_BOOL(bigjoiner);
6687         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6688         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6689
6690         PIPE_CONF_CHECK_I(dsc.compression_enable);
6691         PIPE_CONF_CHECK_I(dsc.dsc_split);
6692         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6693
6694         PIPE_CONF_CHECK_BOOL(splitter.enable);
6695         PIPE_CONF_CHECK_I(splitter.link_count);
6696         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6697
6698         PIPE_CONF_CHECK_I(mst_master_transcoder);
6699
6700         PIPE_CONF_CHECK_BOOL(vrr.enable);
6701         PIPE_CONF_CHECK_I(vrr.vmin);
6702         PIPE_CONF_CHECK_I(vrr.vmax);
6703         PIPE_CONF_CHECK_I(vrr.flipline);
6704         PIPE_CONF_CHECK_I(vrr.pipeline_full);
6705         PIPE_CONF_CHECK_I(vrr.guardband);
6706
6707 #undef PIPE_CONF_CHECK_X
6708 #undef PIPE_CONF_CHECK_I
6709 #undef PIPE_CONF_CHECK_BOOL
6710 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6711 #undef PIPE_CONF_CHECK_P
6712 #undef PIPE_CONF_CHECK_FLAGS
6713 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6714 #undef PIPE_CONF_CHECK_COLOR_LUT
6715 #undef PIPE_CONF_QUIRK
6716
6717         return ret;
6718 }
6719
6720 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6721                                            const struct intel_crtc_state *pipe_config)
6722 {
6723         if (pipe_config->has_pch_encoder) {
6724                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6725                                                             &pipe_config->fdi_m_n);
6726                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6727
6728                 /*
6729                  * FDI already provided one idea for the dotclock.
6730                  * Yell if the encoder disagrees.
6731                  */
6732                 drm_WARN(&dev_priv->drm,
6733                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6734                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6735                          fdi_dotclock, dotclock);
6736         }
6737 }
6738
6739 static void verify_wm_state(struct intel_crtc *crtc,
6740                             struct intel_crtc_state *new_crtc_state)
6741 {
6742         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6743         struct skl_hw_state {
6744                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6745                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6746                 struct skl_pipe_wm wm;
6747         } *hw;
6748         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6749         int level, max_level = ilk_wm_max_level(dev_priv);
6750         struct intel_plane *plane;
6751         u8 hw_enabled_slices;
6752
6753         if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6754                 return;
6755
6756         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6757         if (!hw)
6758                 return;
6759
6760         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6761
6762         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6763
6764         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6765
6766         if (DISPLAY_VER(dev_priv) >= 11 &&
6767             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6768                 drm_err(&dev_priv->drm,
6769                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6770                         dev_priv->dbuf.enabled_slices,
6771                         hw_enabled_slices);
6772
6773         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6774                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6775                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6776
6777                 /* Watermarks */
6778                 for (level = 0; level <= max_level; level++) {
6779                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6780                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6781
6782                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6783                                 continue;
6784
6785                         drm_err(&dev_priv->drm,
6786                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6787                                 plane->base.base.id, plane->base.name, level,
6788                                 sw_wm_level->enable,
6789                                 sw_wm_level->blocks,
6790                                 sw_wm_level->lines,
6791                                 hw_wm_level->enable,
6792                                 hw_wm_level->blocks,
6793                                 hw_wm_level->lines);
6794                 }
6795
6796                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6797                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6798
6799                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6800                         drm_err(&dev_priv->drm,
6801                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6802                                 plane->base.base.id, plane->base.name,
6803                                 sw_wm_level->enable,
6804                                 sw_wm_level->blocks,
6805                                 sw_wm_level->lines,
6806                                 hw_wm_level->enable,
6807                                 hw_wm_level->blocks,
6808                                 hw_wm_level->lines);
6809                 }
6810
6811                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6812                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6813
6814                 if (HAS_HW_SAGV_WM(dev_priv) &&
6815                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6816                         drm_err(&dev_priv->drm,
6817                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6818                                 plane->base.base.id, plane->base.name,
6819                                 sw_wm_level->enable,
6820                                 sw_wm_level->blocks,
6821                                 sw_wm_level->lines,
6822                                 hw_wm_level->enable,
6823                                 hw_wm_level->blocks,
6824                                 hw_wm_level->lines);
6825                 }
6826
6827                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6828                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6829
6830                 if (HAS_HW_SAGV_WM(dev_priv) &&
6831                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6832                         drm_err(&dev_priv->drm,
6833                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6834                                 plane->base.base.id, plane->base.name,
6835                                 sw_wm_level->enable,
6836                                 sw_wm_level->blocks,
6837                                 sw_wm_level->lines,
6838                                 hw_wm_level->enable,
6839                                 hw_wm_level->blocks,
6840                                 hw_wm_level->lines);
6841                 }
6842
6843                 /* DDB */
6844                 hw_ddb_entry = &hw->ddb_y[plane->id];
6845                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6846
6847                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6848                         drm_err(&dev_priv->drm,
6849                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6850                                 plane->base.base.id, plane->base.name,
6851                                 sw_ddb_entry->start, sw_ddb_entry->end,
6852                                 hw_ddb_entry->start, hw_ddb_entry->end);
6853                 }
6854         }
6855
6856         kfree(hw);
6857 }
6858
6859 static void
6860 verify_connector_state(struct intel_atomic_state *state,
6861                        struct intel_crtc *crtc)
6862 {
6863         struct drm_connector *connector;
6864         struct drm_connector_state *new_conn_state;
6865         int i;
6866
6867         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6868                 struct drm_encoder *encoder = connector->encoder;
6869                 struct intel_crtc_state *crtc_state = NULL;
6870
6871                 if (new_conn_state->crtc != &crtc->base)
6872                         continue;
6873
6874                 if (crtc)
6875                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6876
6877                 intel_connector_verify_state(crtc_state, new_conn_state);
6878
6879                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6880                      "connector's atomic encoder doesn't match legacy encoder\n");
6881         }
6882 }
6883
6884 static void
6885 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6886 {
6887         struct intel_encoder *encoder;
6888         struct drm_connector *connector;
6889         struct drm_connector_state *old_conn_state, *new_conn_state;
6890         int i;
6891
6892         for_each_intel_encoder(&dev_priv->drm, encoder) {
6893                 bool enabled = false, found = false;
6894                 enum pipe pipe;
6895
6896                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6897                             encoder->base.base.id,
6898                             encoder->base.name);
6899
6900                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6901                                                    new_conn_state, i) {
6902                         if (old_conn_state->best_encoder == &encoder->base)
6903                                 found = true;
6904
6905                         if (new_conn_state->best_encoder != &encoder->base)
6906                                 continue;
6907                         found = enabled = true;
6908
6909                         I915_STATE_WARN(new_conn_state->crtc !=
6910                                         encoder->base.crtc,
6911                              "connector's crtc doesn't match encoder crtc\n");
6912                 }
6913
6914                 if (!found)
6915                         continue;
6916
6917                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
6918                      "encoder's enabled state mismatch "
6919                      "(expected %i, found %i)\n",
6920                      !!encoder->base.crtc, enabled);
6921
6922                 if (!encoder->base.crtc) {
6923                         bool active;
6924
6925                         active = encoder->get_hw_state(encoder, &pipe);
6926                         I915_STATE_WARN(active,
6927                              "encoder detached but still enabled on pipe %c.\n",
6928                              pipe_name(pipe));
6929                 }
6930         }
6931 }
6932
6933 static void
6934 verify_crtc_state(struct intel_crtc *crtc,
6935                   struct intel_crtc_state *old_crtc_state,
6936                   struct intel_crtc_state *new_crtc_state)
6937 {
6938         struct drm_device *dev = crtc->base.dev;
6939         struct drm_i915_private *dev_priv = to_i915(dev);
6940         struct intel_encoder *encoder;
6941         struct intel_crtc_state *pipe_config = old_crtc_state;
6942         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6943         struct intel_crtc *master_crtc;
6944
6945         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6946         intel_crtc_free_hw_state(old_crtc_state);
6947         intel_crtc_state_reset(old_crtc_state, crtc);
6948         old_crtc_state->uapi.state = state;
6949
6950         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6951                     crtc->base.name);
6952
6953         pipe_config->hw.enable = new_crtc_state->hw.enable;
6954
6955         intel_crtc_get_pipe_config(pipe_config);
6956
6957         /* we keep both pipes enabled on 830 */
6958         if (IS_I830(dev_priv) && pipe_config->hw.active)
6959                 pipe_config->hw.active = new_crtc_state->hw.active;
6960
6961         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6962                         "crtc active state doesn't match with hw state "
6963                         "(expected %i, found %i)\n",
6964                         new_crtc_state->hw.active, pipe_config->hw.active);
6965
6966         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
6967                         "transitional active state does not match atomic hw state "
6968                         "(expected %i, found %i)\n",
6969                         new_crtc_state->hw.active, crtc->active);
6970
6971         master_crtc = intel_master_crtc(new_crtc_state);
6972
6973         for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
6974                 enum pipe pipe;
6975                 bool active;
6976
6977                 active = encoder->get_hw_state(encoder, &pipe);
6978                 I915_STATE_WARN(active != new_crtc_state->hw.active,
6979                                 "[ENCODER:%i] active %i with crtc active %i\n",
6980                                 encoder->base.base.id, active,
6981                                 new_crtc_state->hw.active);
6982
6983                 I915_STATE_WARN(active && master_crtc->pipe != pipe,
6984                                 "Encoder connected to wrong pipe %c\n",
6985                                 pipe_name(pipe));
6986
6987                 if (active)
6988                         intel_encoder_get_config(encoder, pipe_config);
6989         }
6990
6991         if (!new_crtc_state->hw.active)
6992                 return;
6993
6994         intel_pipe_config_sanity_check(dev_priv, pipe_config);
6995
6996         if (!intel_pipe_config_compare(new_crtc_state,
6997                                        pipe_config, false)) {
6998                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
6999                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
7000                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7001         }
7002 }
7003
7004 static void
7005 intel_verify_planes(struct intel_atomic_state *state)
7006 {
7007         struct intel_plane *plane;
7008         const struct intel_plane_state *plane_state;
7009         int i;
7010
7011         for_each_new_intel_plane_in_state(state, plane,
7012                                           plane_state, i)
7013                 assert_plane(plane, plane_state->planar_slave ||
7014                              plane_state->uapi.visible);
7015 }
7016
7017 static void
7018 verify_single_dpll_state(struct drm_i915_private *dev_priv,
7019                          struct intel_shared_dpll *pll,
7020                          struct intel_crtc *crtc,
7021                          struct intel_crtc_state *new_crtc_state)
7022 {
7023         struct intel_dpll_hw_state dpll_hw_state;
7024         u8 pipe_mask;
7025         bool active;
7026
7027         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7028
7029         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7030
7031         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7032
7033         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7034                 I915_STATE_WARN(!pll->on && pll->active_mask,
7035                      "pll in active use but not on in sw tracking\n");
7036                 I915_STATE_WARN(pll->on && !pll->active_mask,
7037                      "pll is on but not used by any active pipe\n");
7038                 I915_STATE_WARN(pll->on != active,
7039                      "pll on state mismatch (expected %i, found %i)\n",
7040                      pll->on, active);
7041         }
7042
7043         if (!crtc) {
7044                 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7045                                 "more active pll users than references: 0x%x vs 0x%x\n",
7046                                 pll->active_mask, pll->state.pipe_mask);
7047
7048                 return;
7049         }
7050
7051         pipe_mask = BIT(crtc->pipe);
7052
7053         if (new_crtc_state->hw.active)
7054                 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7055                                 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7056                                 pipe_name(crtc->pipe), pll->active_mask);
7057         else
7058                 I915_STATE_WARN(pll->active_mask & pipe_mask,
7059                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7060                                 pipe_name(crtc->pipe), pll->active_mask);
7061
7062         I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7063                         "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7064                         pipe_mask, pll->state.pipe_mask);
7065
7066         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7067                                           &dpll_hw_state,
7068                                           sizeof(dpll_hw_state)),
7069                         "pll hw state mismatch\n");
7070 }
7071
7072 static void
7073 verify_shared_dpll_state(struct intel_crtc *crtc,
7074                          struct intel_crtc_state *old_crtc_state,
7075                          struct intel_crtc_state *new_crtc_state)
7076 {
7077         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7078
7079         if (new_crtc_state->shared_dpll)
7080                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7081
7082         if (old_crtc_state->shared_dpll &&
7083             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7084                 u8 pipe_mask = BIT(crtc->pipe);
7085                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7086
7087                 I915_STATE_WARN(pll->active_mask & pipe_mask,
7088                                 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7089                                 pipe_name(crtc->pipe), pll->active_mask);
7090                 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7091                                 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7092                                 pipe_name(crtc->pipe), pll->state.pipe_mask);
7093         }
7094 }
7095
7096 static void
7097 verify_mpllb_state(struct intel_atomic_state *state,
7098                    struct intel_crtc_state *new_crtc_state)
7099 {
7100         struct drm_i915_private *i915 = to_i915(state->base.dev);
7101         struct intel_mpllb_state mpllb_hw_state = { 0 };
7102         struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7103         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7104         struct intel_encoder *encoder;
7105
7106         if (!IS_DG2(i915))
7107                 return;
7108
7109         if (!new_crtc_state->hw.active)
7110                 return;
7111
7112         encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7113         intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7114
7115 #define MPLLB_CHECK(name) do { \
7116         if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7117                 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7118                                      "(expected 0x%08x, found 0x%08x)", \
7119                                      mpllb_sw_state->name, \
7120                                      mpllb_hw_state.name); \
7121         } \
7122 } while (0)
7123
7124         MPLLB_CHECK(mpllb_cp);
7125         MPLLB_CHECK(mpllb_div);
7126         MPLLB_CHECK(mpllb_div2);
7127         MPLLB_CHECK(mpllb_fracn1);
7128         MPLLB_CHECK(mpllb_fracn2);
7129         MPLLB_CHECK(mpllb_sscen);
7130         MPLLB_CHECK(mpllb_sscstep);
7131
7132         /*
7133          * ref_control is handled by the hardware/firemware and never
7134          * programmed by the software, but the proper values are supplied
7135          * in the bspec for verification purposes.
7136          */
7137         MPLLB_CHECK(ref_control);
7138
7139 #undef MPLLB_CHECK
7140 }
7141
7142 static void
7143 intel_modeset_verify_crtc(struct intel_crtc *crtc,
7144                           struct intel_atomic_state *state,
7145                           struct intel_crtc_state *old_crtc_state,
7146                           struct intel_crtc_state *new_crtc_state)
7147 {
7148         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7149                 return;
7150
7151         verify_wm_state(crtc, new_crtc_state);
7152         verify_connector_state(state, crtc);
7153         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7154         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7155         verify_mpllb_state(state, new_crtc_state);
7156 }
7157
7158 static void
7159 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7160 {
7161         int i;
7162
7163         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7164                 verify_single_dpll_state(dev_priv,
7165                                          &dev_priv->dpll.shared_dplls[i],
7166                                          NULL, NULL);
7167 }
7168
7169 static void
7170 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7171                               struct intel_atomic_state *state)
7172 {
7173         verify_encoder_state(dev_priv, state);
7174         verify_connector_state(state, NULL);
7175         verify_disabled_dpll_state(dev_priv);
7176 }
7177
7178 int intel_modeset_all_pipes(struct intel_atomic_state *state)
7179 {
7180         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7181         struct intel_crtc *crtc;
7182
7183         /*
7184          * Add all pipes to the state, and force
7185          * a modeset on all the active ones.
7186          */
7187         for_each_intel_crtc(&dev_priv->drm, crtc) {
7188                 struct intel_crtc_state *crtc_state;
7189                 int ret;
7190
7191                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7192                 if (IS_ERR(crtc_state))
7193                         return PTR_ERR(crtc_state);
7194
7195                 if (!crtc_state->hw.active ||
7196                     drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7197                         continue;
7198
7199                 crtc_state->uapi.mode_changed = true;
7200
7201                 ret = drm_atomic_add_affected_connectors(&state->base,
7202                                                          &crtc->base);
7203                 if (ret)
7204                         return ret;
7205
7206                 ret = intel_atomic_add_affected_planes(state, crtc);
7207                 if (ret)
7208                         return ret;
7209
7210                 crtc_state->update_planes |= crtc_state->active_planes;
7211         }
7212
7213         return 0;
7214 }
7215
7216 static void
7217 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7218 {
7219         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7220         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7221         struct drm_display_mode adjusted_mode =
7222                 crtc_state->hw.adjusted_mode;
7223
7224         if (crtc_state->vrr.enable) {
7225                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7226                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7227                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7228                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7229         }
7230
7231         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7232
7233         crtc->mode_flags = crtc_state->mode_flags;
7234
7235         /*
7236          * The scanline counter increments at the leading edge of hsync.
7237          *
7238          * On most platforms it starts counting from vtotal-1 on the
7239          * first active line. That means the scanline counter value is
7240          * always one less than what we would expect. Ie. just after
7241          * start of vblank, which also occurs at start of hsync (on the
7242          * last active line), the scanline counter will read vblank_start-1.
7243          *
7244          * On gen2 the scanline counter starts counting from 1 instead
7245          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7246          * to keep the value positive), instead of adding one.
7247          *
7248          * On HSW+ the behaviour of the scanline counter depends on the output
7249          * type. For DP ports it behaves like most other platforms, but on HDMI
7250          * there's an extra 1 line difference. So we need to add two instead of
7251          * one to the value.
7252          *
7253          * On VLV/CHV DSI the scanline counter would appear to increment
7254          * approx. 1/3 of a scanline before start of vblank. Unfortunately
7255          * that means we can't tell whether we're in vblank or not while
7256          * we're on that particular line. We must still set scanline_offset
7257          * to 1 so that the vblank timestamps come out correct when we query
7258          * the scanline counter from within the vblank interrupt handler.
7259          * However if queried just before the start of vblank we'll get an
7260          * answer that's slightly in the future.
7261          */
7262         if (DISPLAY_VER(dev_priv) == 2) {
7263                 int vtotal;
7264
7265                 vtotal = adjusted_mode.crtc_vtotal;
7266                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7267                         vtotal /= 2;
7268
7269                 crtc->scanline_offset = vtotal - 1;
7270         } else if (HAS_DDI(dev_priv) &&
7271                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7272                 crtc->scanline_offset = 2;
7273         } else {
7274                 crtc->scanline_offset = 1;
7275         }
7276 }
7277
7278 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7279 {
7280         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7281         struct intel_crtc_state *new_crtc_state;
7282         struct intel_crtc *crtc;
7283         int i;
7284
7285         if (!dev_priv->dpll_funcs)
7286                 return;
7287
7288         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7289                 if (!intel_crtc_needs_modeset(new_crtc_state))
7290                         continue;
7291
7292                 intel_release_shared_dplls(state, crtc);
7293         }
7294 }
7295
7296 /*
7297  * This implements the workaround described in the "notes" section of the mode
7298  * set sequence documentation. When going from no pipes or single pipe to
7299  * multiple pipes, and planes are enabled after the pipe, we need to wait at
7300  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7301  */
7302 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7303 {
7304         struct intel_crtc_state *crtc_state;
7305         struct intel_crtc *crtc;
7306         struct intel_crtc_state *first_crtc_state = NULL;
7307         struct intel_crtc_state *other_crtc_state = NULL;
7308         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7309         int i;
7310
7311         /* look at all crtc's that are going to be enabled in during modeset */
7312         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7313                 if (!crtc_state->hw.active ||
7314                     !intel_crtc_needs_modeset(crtc_state))
7315                         continue;
7316
7317                 if (first_crtc_state) {
7318                         other_crtc_state = crtc_state;
7319                         break;
7320                 } else {
7321                         first_crtc_state = crtc_state;
7322                         first_pipe = crtc->pipe;
7323                 }
7324         }
7325
7326         /* No workaround needed? */
7327         if (!first_crtc_state)
7328                 return 0;
7329
7330         /* w/a possibly needed, check how many crtc's are already enabled. */
7331         for_each_intel_crtc(state->base.dev, crtc) {
7332                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7333                 if (IS_ERR(crtc_state))
7334                         return PTR_ERR(crtc_state);
7335
7336                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7337
7338                 if (!crtc_state->hw.active ||
7339                     intel_crtc_needs_modeset(crtc_state))
7340                         continue;
7341
7342                 /* 2 or more enabled crtcs means no need for w/a */
7343                 if (enabled_pipe != INVALID_PIPE)
7344                         return 0;
7345
7346                 enabled_pipe = crtc->pipe;
7347         }
7348
7349         if (enabled_pipe != INVALID_PIPE)
7350                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7351         else if (other_crtc_state)
7352                 other_crtc_state->hsw_workaround_pipe = first_pipe;
7353
7354         return 0;
7355 }
7356
7357 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7358                            u8 active_pipes)
7359 {
7360         const struct intel_crtc_state *crtc_state;
7361         struct intel_crtc *crtc;
7362         int i;
7363
7364         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7365                 if (crtc_state->hw.active)
7366                         active_pipes |= BIT(crtc->pipe);
7367                 else
7368                         active_pipes &= ~BIT(crtc->pipe);
7369         }
7370
7371         return active_pipes;
7372 }
7373
7374 static int intel_modeset_checks(struct intel_atomic_state *state)
7375 {
7376         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7377
7378         state->modeset = true;
7379
7380         if (IS_HASWELL(dev_priv))
7381                 return hsw_mode_set_planes_workaround(state);
7382
7383         return 0;
7384 }
7385
7386 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7387                                      struct intel_crtc_state *new_crtc_state)
7388 {
7389         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7390                 return;
7391
7392         new_crtc_state->uapi.mode_changed = false;
7393         new_crtc_state->update_pipe = true;
7394 }
7395
7396 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7397                                     struct intel_crtc_state *new_crtc_state)
7398 {
7399         /*
7400          * If we're not doing the full modeset we want to
7401          * keep the current M/N values as they may be
7402          * sufficiently different to the computed values
7403          * to cause problems.
7404          *
7405          * FIXME: should really copy more fuzzy state here
7406          */
7407         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7408         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7409         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7410         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7411 }
7412
7413 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7414                                           struct intel_crtc *crtc,
7415                                           u8 plane_ids_mask)
7416 {
7417         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7418         struct intel_plane *plane;
7419
7420         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7421                 struct intel_plane_state *plane_state;
7422
7423                 if ((plane_ids_mask & BIT(plane->id)) == 0)
7424                         continue;
7425
7426                 plane_state = intel_atomic_get_plane_state(state, plane);
7427                 if (IS_ERR(plane_state))
7428                         return PTR_ERR(plane_state);
7429         }
7430
7431         return 0;
7432 }
7433
7434 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7435                                      struct intel_crtc *crtc)
7436 {
7437         const struct intel_crtc_state *old_crtc_state =
7438                 intel_atomic_get_old_crtc_state(state, crtc);
7439         const struct intel_crtc_state *new_crtc_state =
7440                 intel_atomic_get_new_crtc_state(state, crtc);
7441
7442         return intel_crtc_add_planes_to_state(state, crtc,
7443                                               old_crtc_state->enabled_planes |
7444                                               new_crtc_state->enabled_planes);
7445 }
7446
7447 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7448 {
7449         /* See {hsw,vlv,ivb}_plane_ratio() */
7450         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7451                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7452                 IS_IVYBRIDGE(dev_priv);
7453 }
7454
7455 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7456                                            struct intel_crtc *crtc,
7457                                            struct intel_crtc *other)
7458 {
7459         const struct intel_plane_state *plane_state;
7460         struct intel_plane *plane;
7461         u8 plane_ids = 0;
7462         int i;
7463
7464         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7465                 if (plane->pipe == crtc->pipe)
7466                         plane_ids |= BIT(plane->id);
7467         }
7468
7469         return intel_crtc_add_planes_to_state(state, other, plane_ids);
7470 }
7471
7472 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7473 {
7474         const struct intel_crtc_state *crtc_state;
7475         struct intel_crtc *crtc;
7476         int i;
7477
7478         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7479                 int ret;
7480
7481                 if (!crtc_state->bigjoiner)
7482                         continue;
7483
7484                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7485                                                       crtc_state->bigjoiner_linked_crtc);
7486                 if (ret)
7487                         return ret;
7488         }
7489
7490         return 0;
7491 }
7492
7493 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7494 {
7495         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7496         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7497         struct intel_plane_state *plane_state;
7498         struct intel_plane *plane;
7499         struct intel_crtc *crtc;
7500         int i, ret;
7501
7502         ret = icl_add_linked_planes(state);
7503         if (ret)
7504                 return ret;
7505
7506         ret = intel_bigjoiner_add_affected_planes(state);
7507         if (ret)
7508                 return ret;
7509
7510         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7511                 ret = intel_plane_atomic_check(state, plane);
7512                 if (ret) {
7513                         drm_dbg_atomic(&dev_priv->drm,
7514                                        "[PLANE:%d:%s] atomic driver check failed\n",
7515                                        plane->base.base.id, plane->base.name);
7516                         return ret;
7517                 }
7518         }
7519
7520         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7521                                             new_crtc_state, i) {
7522                 u8 old_active_planes, new_active_planes;
7523
7524                 ret = icl_check_nv12_planes(new_crtc_state);
7525                 if (ret)
7526                         return ret;
7527
7528                 /*
7529                  * On some platforms the number of active planes affects
7530                  * the planes' minimum cdclk calculation. Add such planes
7531                  * to the state before we compute the minimum cdclk.
7532                  */
7533                 if (!active_planes_affects_min_cdclk(dev_priv))
7534                         continue;
7535
7536                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7537                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7538
7539                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
7540                         continue;
7541
7542                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7543                 if (ret)
7544                         return ret;
7545         }
7546
7547         return 0;
7548 }
7549
7550 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7551 {
7552         struct intel_crtc_state *crtc_state;
7553         struct intel_crtc *crtc;
7554         int i;
7555
7556         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7557                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7558                 int ret;
7559
7560                 ret = intel_crtc_atomic_check(state, crtc);
7561                 if (ret) {
7562                         drm_dbg_atomic(&i915->drm,
7563                                        "[CRTC:%d:%s] atomic driver check failed\n",
7564                                        crtc->base.base.id, crtc->base.name);
7565                         return ret;
7566                 }
7567         }
7568
7569         return 0;
7570 }
7571
7572 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7573                                                u8 transcoders)
7574 {
7575         const struct intel_crtc_state *new_crtc_state;
7576         struct intel_crtc *crtc;
7577         int i;
7578
7579         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7580                 if (new_crtc_state->hw.enable &&
7581                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7582                     intel_crtc_needs_modeset(new_crtc_state))
7583                         return true;
7584         }
7585
7586         return false;
7587 }
7588
7589 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7590                                         struct intel_crtc *crtc,
7591                                         struct intel_crtc_state *old_crtc_state,
7592                                         struct intel_crtc_state *new_crtc_state)
7593 {
7594         struct drm_i915_private *i915 = to_i915(state->base.dev);
7595         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7596         struct intel_crtc *slave_crtc, *master_crtc;
7597
7598         /* slave being enabled, is master is still claiming this crtc? */
7599         if (old_crtc_state->bigjoiner_slave) {
7600                 slave_crtc = crtc;
7601                 master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7602                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7603                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7604                         goto claimed;
7605         }
7606
7607         if (!new_crtc_state->bigjoiner)
7608                 return 0;
7609
7610         slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7611         if (!slave_crtc) {
7612                 drm_dbg_kms(&i915->drm,
7613                             "[CRTC:%d:%s] Big joiner configuration requires "
7614                             "CRTC + 1 to be used, doesn't exist\n",
7615                             crtc->base.base.id, crtc->base.name);
7616                 return -EINVAL;
7617         }
7618
7619         new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7620         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7621         master_crtc = crtc;
7622         if (IS_ERR(slave_crtc_state))
7623                 return PTR_ERR(slave_crtc_state);
7624
7625         /* master being enabled, slave was already configured? */
7626         if (slave_crtc_state->uapi.enable)
7627                 goto claimed;
7628
7629         drm_dbg_kms(&i915->drm,
7630                     "[CRTC:%d:%s] Used as slave for big joiner\n",
7631                     slave_crtc->base.base.id, slave_crtc->base.name);
7632
7633         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7634
7635 claimed:
7636         drm_dbg_kms(&i915->drm,
7637                     "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7638                     "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7639                     slave_crtc->base.base.id, slave_crtc->base.name,
7640                     master_crtc->base.base.id, master_crtc->base.name);
7641         return -EINVAL;
7642 }
7643
7644 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7645                                  struct intel_crtc_state *master_crtc_state)
7646 {
7647         struct intel_crtc_state *slave_crtc_state =
7648                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7649
7650         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7651         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7652         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7653         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7654 }
7655
7656 /**
7657  * DOC: asynchronous flip implementation
7658  *
7659  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7660  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7661  * Correspondingly, support is currently added for primary plane only.
7662  *
7663  * Async flip can only change the plane surface address, so anything else
7664  * changing is rejected from the intel_atomic_check_async() function.
7665  * Once this check is cleared, flip done interrupt is enabled using
7666  * the intel_crtc_enable_flip_done() function.
7667  *
7668  * As soon as the surface address register is written, flip done interrupt is
7669  * generated and the requested events are sent to the usersapce in the interrupt
7670  * handler itself. The timestamp and sequence sent during the flip done event
7671  * correspond to the last vblank and have no relation to the actual time when
7672  * the flip done event was sent.
7673  */
7674 static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7675 {
7676         struct drm_i915_private *i915 = to_i915(state->base.dev);
7677         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7678         const struct intel_plane_state *new_plane_state, *old_plane_state;
7679         struct intel_plane *plane;
7680         int i;
7681
7682         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7683         new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7684
7685         if (intel_crtc_needs_modeset(new_crtc_state)) {
7686                 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7687                 return -EINVAL;
7688         }
7689
7690         if (!new_crtc_state->hw.active) {
7691                 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7692                 return -EINVAL;
7693         }
7694         if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7695                 drm_dbg_kms(&i915->drm,
7696                             "Active planes cannot be changed during async flip\n");
7697                 return -EINVAL;
7698         }
7699
7700         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7701                                              new_plane_state, i) {
7702                 if (plane->pipe != crtc->pipe)
7703                         continue;
7704
7705                 /*
7706                  * TODO: Async flip is only supported through the page flip IOCTL
7707                  * as of now. So support currently added for primary plane only.
7708                  * Support for other planes on platforms on which supports
7709                  * this(vlv/chv and icl+) should be added when async flip is
7710                  * enabled in the atomic IOCTL path.
7711                  */
7712                 if (!plane->async_flip)
7713                         return -EINVAL;
7714
7715                 /*
7716                  * FIXME: This check is kept generic for all platforms.
7717                  * Need to verify this for all gen9 platforms to enable
7718                  * this selectively if required.
7719                  */
7720                 switch (new_plane_state->hw.fb->modifier) {
7721                 case I915_FORMAT_MOD_X_TILED:
7722                 case I915_FORMAT_MOD_Y_TILED:
7723                 case I915_FORMAT_MOD_Yf_TILED:
7724                         break;
7725                 default:
7726                         drm_dbg_kms(&i915->drm,
7727                                     "Linear memory/CCS does not support async flips\n");
7728                         return -EINVAL;
7729                 }
7730
7731                 if (new_plane_state->hw.fb->format->num_planes > 1) {
7732                         drm_dbg_kms(&i915->drm,
7733                                     "Planar formats not supported with async flips\n");
7734                         return -EINVAL;
7735                 }
7736
7737                 if (old_plane_state->view.color_plane[0].mapping_stride !=
7738                     new_plane_state->view.color_plane[0].mapping_stride) {
7739                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7740                         return -EINVAL;
7741                 }
7742
7743                 if (old_plane_state->hw.fb->modifier !=
7744                     new_plane_state->hw.fb->modifier) {
7745                         drm_dbg_kms(&i915->drm,
7746                                     "Framebuffer modifiers cannot be changed in async flip\n");
7747                         return -EINVAL;
7748                 }
7749
7750                 if (old_plane_state->hw.fb->format !=
7751                     new_plane_state->hw.fb->format) {
7752                         drm_dbg_kms(&i915->drm,
7753                                     "Framebuffer format cannot be changed in async flip\n");
7754                         return -EINVAL;
7755                 }
7756
7757                 if (old_plane_state->hw.rotation !=
7758                     new_plane_state->hw.rotation) {
7759                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7760                         return -EINVAL;
7761                 }
7762
7763                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7764                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7765                         drm_dbg_kms(&i915->drm,
7766                                     "Plane size/co-ordinates cannot be changed in async flip\n");
7767                         return -EINVAL;
7768                 }
7769
7770                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7771                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7772                         return -EINVAL;
7773                 }
7774
7775                 if (old_plane_state->hw.pixel_blend_mode !=
7776                     new_plane_state->hw.pixel_blend_mode) {
7777                         drm_dbg_kms(&i915->drm,
7778                                     "Pixel blend mode cannot be changed in async flip\n");
7779                         return -EINVAL;
7780                 }
7781
7782                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7783                         drm_dbg_kms(&i915->drm,
7784                                     "Color encoding cannot be changed in async flip\n");
7785                         return -EINVAL;
7786                 }
7787
7788                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7789                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7790                         return -EINVAL;
7791                 }
7792
7793                 /* plane decryption is allow to change only in synchronous flips */
7794                 if (old_plane_state->decrypt != new_plane_state->decrypt)
7795                         return -EINVAL;
7796         }
7797
7798         return 0;
7799 }
7800
7801 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7802 {
7803         struct intel_crtc_state *crtc_state;
7804         struct intel_crtc *crtc;
7805         int i;
7806
7807         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7808                 struct intel_crtc_state *linked_crtc_state;
7809                 struct intel_crtc *linked_crtc;
7810                 int ret;
7811
7812                 if (!crtc_state->bigjoiner)
7813                         continue;
7814
7815                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
7816                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7817                 if (IS_ERR(linked_crtc_state))
7818                         return PTR_ERR(linked_crtc_state);
7819
7820                 if (!intel_crtc_needs_modeset(crtc_state))
7821                         continue;
7822
7823                 linked_crtc_state->uapi.mode_changed = true;
7824
7825                 ret = drm_atomic_add_affected_connectors(&state->base,
7826                                                          &linked_crtc->base);
7827                 if (ret)
7828                         return ret;
7829
7830                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
7831                 if (ret)
7832                         return ret;
7833         }
7834
7835         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7836                 /* Kill old bigjoiner link, we may re-establish afterwards */
7837                 if (intel_crtc_needs_modeset(crtc_state) &&
7838                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7839                         kill_bigjoiner_slave(state, crtc_state);
7840         }
7841
7842         return 0;
7843 }
7844
7845 /**
7846  * intel_atomic_check - validate state object
7847  * @dev: drm device
7848  * @_state: state to validate
7849  */
7850 static int intel_atomic_check(struct drm_device *dev,
7851                               struct drm_atomic_state *_state)
7852 {
7853         struct drm_i915_private *dev_priv = to_i915(dev);
7854         struct intel_atomic_state *state = to_intel_atomic_state(_state);
7855         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7856         struct intel_crtc *crtc;
7857         int ret, i;
7858         bool any_ms = false;
7859
7860         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7861                                             new_crtc_state, i) {
7862                 if (new_crtc_state->inherited != old_crtc_state->inherited)
7863                         new_crtc_state->uapi.mode_changed = true;
7864         }
7865
7866         intel_vrr_check_modeset(state);
7867
7868         ret = drm_atomic_helper_check_modeset(dev, &state->base);
7869         if (ret)
7870                 goto fail;
7871
7872         ret = intel_bigjoiner_add_affected_crtcs(state);
7873         if (ret)
7874                 goto fail;
7875
7876         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7877                                             new_crtc_state, i) {
7878                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7879                         /* Light copy */
7880                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7881
7882                         continue;
7883                 }
7884
7885                 if (!new_crtc_state->uapi.enable) {
7886                         if (!new_crtc_state->bigjoiner_slave) {
7887                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7888                                 any_ms = true;
7889                         }
7890                         continue;
7891                 }
7892
7893                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7894                 if (ret)
7895                         goto fail;
7896
7897                 ret = intel_modeset_pipe_config(state, new_crtc_state);
7898                 if (ret)
7899                         goto fail;
7900
7901                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7902                                                    new_crtc_state);
7903                 if (ret)
7904                         goto fail;
7905         }
7906
7907         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7908                                             new_crtc_state, i) {
7909                 if (!intel_crtc_needs_modeset(new_crtc_state))
7910                         continue;
7911
7912                 ret = intel_modeset_pipe_config_late(new_crtc_state);
7913                 if (ret)
7914                         goto fail;
7915
7916                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7917         }
7918
7919         /**
7920          * Check if fastset is allowed by external dependencies like other
7921          * pipes and transcoders.
7922          *
7923          * Right now it only forces a fullmodeset when the MST master
7924          * transcoder did not changed but the pipe of the master transcoder
7925          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7926          * in case of port synced crtcs, if one of the synced crtcs
7927          * needs a full modeset, all other synced crtcs should be
7928          * forced a full modeset.
7929          */
7930         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7931                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7932                         continue;
7933
7934                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7935                         enum transcoder master = new_crtc_state->mst_master_transcoder;
7936
7937                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7938                                 new_crtc_state->uapi.mode_changed = true;
7939                                 new_crtc_state->update_pipe = false;
7940                         }
7941                 }
7942
7943                 if (is_trans_port_sync_mode(new_crtc_state)) {
7944                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
7945
7946                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7947                                 trans |= BIT(new_crtc_state->master_transcoder);
7948
7949                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
7950                                 new_crtc_state->uapi.mode_changed = true;
7951                                 new_crtc_state->update_pipe = false;
7952                         }
7953                 }
7954
7955                 if (new_crtc_state->bigjoiner) {
7956                         struct intel_crtc_state *linked_crtc_state =
7957                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
7958
7959                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
7960                                 new_crtc_state->uapi.mode_changed = true;
7961                                 new_crtc_state->update_pipe = false;
7962                         }
7963                 }
7964         }
7965
7966         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7967                                             new_crtc_state, i) {
7968                 if (intel_crtc_needs_modeset(new_crtc_state)) {
7969                         any_ms = true;
7970                         continue;
7971                 }
7972
7973                 if (!new_crtc_state->update_pipe)
7974                         continue;
7975
7976                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
7977         }
7978
7979         if (any_ms && !check_digital_port_conflicts(state)) {
7980                 drm_dbg_kms(&dev_priv->drm,
7981                             "rejecting conflicting digital port configuration\n");
7982                 ret = -EINVAL;
7983                 goto fail;
7984         }
7985
7986         ret = drm_dp_mst_atomic_check(&state->base);
7987         if (ret)
7988                 goto fail;
7989
7990         ret = intel_atomic_check_planes(state);
7991         if (ret)
7992                 goto fail;
7993
7994         ret = intel_compute_global_watermarks(state);
7995         if (ret)
7996                 goto fail;
7997
7998         ret = intel_bw_atomic_check(state);
7999         if (ret)
8000                 goto fail;
8001
8002         ret = intel_cdclk_atomic_check(state, &any_ms);
8003         if (ret)
8004                 goto fail;
8005
8006         if (intel_any_crtc_needs_modeset(state))
8007                 any_ms = true;
8008
8009         if (any_ms) {
8010                 ret = intel_modeset_checks(state);
8011                 if (ret)
8012                         goto fail;
8013
8014                 ret = intel_modeset_calc_cdclk(state);
8015                 if (ret)
8016                         return ret;
8017
8018                 intel_modeset_clear_plls(state);
8019         }
8020
8021         ret = intel_atomic_check_crtcs(state);
8022         if (ret)
8023                 goto fail;
8024
8025         ret = intel_fbc_atomic_check(state);
8026         if (ret)
8027                 goto fail;
8028
8029         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8030                                             new_crtc_state, i) {
8031                 if (new_crtc_state->uapi.async_flip) {
8032                         ret = intel_atomic_check_async(state, crtc);
8033                         if (ret)
8034                                 goto fail;
8035                 }
8036
8037                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
8038                     !new_crtc_state->update_pipe)
8039                         continue;
8040
8041                 intel_dump_pipe_config(new_crtc_state, state,
8042                                        intel_crtc_needs_modeset(new_crtc_state) ?
8043                                        "[modeset]" : "[fastset]");
8044         }
8045
8046         return 0;
8047
8048  fail:
8049         if (ret == -EDEADLK)
8050                 return ret;
8051
8052         /*
8053          * FIXME would probably be nice to know which crtc specifically
8054          * caused the failure, in cases where we can pinpoint it.
8055          */
8056         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8057                                             new_crtc_state, i)
8058                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8059
8060         return ret;
8061 }
8062
8063 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8064 {
8065         struct intel_crtc_state *crtc_state;
8066         struct intel_crtc *crtc;
8067         int i, ret;
8068
8069         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8070         if (ret < 0)
8071                 return ret;
8072
8073         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8074                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8075
8076                 if (mode_changed || crtc_state->update_pipe ||
8077                     crtc_state->uapi.color_mgmt_changed) {
8078                         intel_dsb_prepare(crtc_state);
8079                 }
8080         }
8081
8082         return 0;
8083 }
8084
8085 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8086                                   struct intel_crtc_state *crtc_state)
8087 {
8088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8089
8090         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8091                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8092
8093         if (crtc_state->has_pch_encoder) {
8094                 enum pipe pch_transcoder =
8095                         intel_crtc_pch_transcoder(crtc);
8096
8097                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8098         }
8099 }
8100
8101 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8102                                const struct intel_crtc_state *new_crtc_state)
8103 {
8104         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8105         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8106
8107         /*
8108          * Update pipe size and adjust fitter if needed: the reason for this is
8109          * that in compute_mode_changes we check the native mode (not the pfit
8110          * mode) to see if we can flip rather than do a full mode set. In the
8111          * fastboot case, we'll flip, but if we don't update the pipesrc and
8112          * pfit state, we'll end up with a big fb scanned out into the wrong
8113          * sized surface.
8114          */
8115         intel_set_pipe_src_size(new_crtc_state);
8116
8117         /* on skylake this is done by detaching scalers */
8118         if (DISPLAY_VER(dev_priv) >= 9) {
8119                 if (new_crtc_state->pch_pfit.enabled)
8120                         skl_pfit_enable(new_crtc_state);
8121         } else if (HAS_PCH_SPLIT(dev_priv)) {
8122                 if (new_crtc_state->pch_pfit.enabled)
8123                         ilk_pfit_enable(new_crtc_state);
8124                 else if (old_crtc_state->pch_pfit.enabled)
8125                         ilk_pfit_disable(old_crtc_state);
8126         }
8127
8128         /*
8129          * The register is supposedly single buffered so perhaps
8130          * not 100% correct to do this here. But SKL+ calculate
8131          * this based on the adjust pixel rate so pfit changes do
8132          * affect it and so it must be updated for fastsets.
8133          * HSW/BDW only really need this here for fastboot, after
8134          * that the value should not change without a full modeset.
8135          */
8136         if (DISPLAY_VER(dev_priv) >= 9 ||
8137             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8138                 hsw_set_linetime_wm(new_crtc_state);
8139
8140         if (DISPLAY_VER(dev_priv) >= 11)
8141                 icl_set_pipe_chicken(new_crtc_state);
8142 }
8143
8144 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8145                                    struct intel_crtc *crtc)
8146 {
8147         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8148         const struct intel_crtc_state *old_crtc_state =
8149                 intel_atomic_get_old_crtc_state(state, crtc);
8150         const struct intel_crtc_state *new_crtc_state =
8151                 intel_atomic_get_new_crtc_state(state, crtc);
8152         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8153
8154         /*
8155          * During modesets pipe configuration was programmed as the
8156          * CRTC was enabled.
8157          */
8158         if (!modeset) {
8159                 if (new_crtc_state->uapi.color_mgmt_changed ||
8160                     new_crtc_state->update_pipe)
8161                         intel_color_commit(new_crtc_state);
8162
8163                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8164                         bdw_set_pipemisc(new_crtc_state);
8165
8166                 if (new_crtc_state->update_pipe)
8167                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
8168         }
8169
8170         intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8171
8172         intel_atomic_update_watermarks(state, crtc);
8173 }
8174
8175 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8176                                     struct intel_crtc *crtc)
8177 {
8178         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8179         const struct intel_crtc_state *new_crtc_state =
8180                 intel_atomic_get_new_crtc_state(state, crtc);
8181
8182         /*
8183          * Disable the scaler(s) after the plane(s) so that we don't
8184          * get a catastrophic underrun even if the two operations
8185          * end up happening in two different frames.
8186          */
8187         if (DISPLAY_VER(dev_priv) >= 9 &&
8188             !intel_crtc_needs_modeset(new_crtc_state))
8189                 skl_detach_scalers(new_crtc_state);
8190 }
8191
8192 static void intel_enable_crtc(struct intel_atomic_state *state,
8193                               struct intel_crtc *crtc)
8194 {
8195         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8196         const struct intel_crtc_state *new_crtc_state =
8197                 intel_atomic_get_new_crtc_state(state, crtc);
8198
8199         if (!intel_crtc_needs_modeset(new_crtc_state))
8200                 return;
8201
8202         intel_crtc_update_active_timings(new_crtc_state);
8203
8204         dev_priv->display->crtc_enable(state, crtc);
8205
8206         if (new_crtc_state->bigjoiner_slave)
8207                 return;
8208
8209         /* vblanks work again, re-enable pipe CRC. */
8210         intel_crtc_enable_pipe_crc(crtc);
8211 }
8212
8213 static void intel_update_crtc(struct intel_atomic_state *state,
8214                               struct intel_crtc *crtc)
8215 {
8216         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8217         const struct intel_crtc_state *old_crtc_state =
8218                 intel_atomic_get_old_crtc_state(state, crtc);
8219         struct intel_crtc_state *new_crtc_state =
8220                 intel_atomic_get_new_crtc_state(state, crtc);
8221         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8222
8223         if (!modeset) {
8224                 if (new_crtc_state->preload_luts &&
8225                     (new_crtc_state->uapi.color_mgmt_changed ||
8226                      new_crtc_state->update_pipe))
8227                         intel_color_load_luts(new_crtc_state);
8228
8229                 intel_pre_plane_update(state, crtc);
8230
8231                 if (new_crtc_state->update_pipe)
8232                         intel_encoders_update_pipe(state, crtc);
8233         }
8234
8235         intel_fbc_update(state, crtc);
8236
8237         intel_update_planes_on_crtc(state, crtc);
8238
8239         /* Perform vblank evasion around commit operation */
8240         intel_pipe_update_start(new_crtc_state);
8241
8242         commit_pipe_pre_planes(state, crtc);
8243
8244         if (DISPLAY_VER(dev_priv) >= 9)
8245                 skl_arm_planes_on_crtc(state, crtc);
8246         else
8247                 i9xx_arm_planes_on_crtc(state, crtc);
8248
8249         commit_pipe_post_planes(state, crtc);
8250
8251         intel_pipe_update_end(new_crtc_state);
8252
8253         /*
8254          * We usually enable FIFO underrun interrupts as part of the
8255          * CRTC enable sequence during modesets.  But when we inherit a
8256          * valid pipe configuration from the BIOS we need to take care
8257          * of enabling them on the CRTC's first fastset.
8258          */
8259         if (new_crtc_state->update_pipe && !modeset &&
8260             old_crtc_state->inherited)
8261                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8262 }
8263
8264 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8265                                           struct intel_crtc_state *old_crtc_state,
8266                                           struct intel_crtc_state *new_crtc_state,
8267                                           struct intel_crtc *crtc)
8268 {
8269         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8270
8271         /*
8272          * We need to disable pipe CRC before disabling the pipe,
8273          * or we race against vblank off.
8274          */
8275         intel_crtc_disable_pipe_crc(crtc);
8276
8277         dev_priv->display->crtc_disable(state, crtc);
8278         crtc->active = false;
8279         intel_fbc_disable(crtc);
8280         intel_disable_shared_dpll(old_crtc_state);
8281
8282         /* FIXME unify this for all platforms */
8283         if (!new_crtc_state->hw.active &&
8284             !HAS_GMCH(dev_priv))
8285                 intel_initial_watermarks(state, crtc);
8286 }
8287
8288 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8289 {
8290         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8291         struct intel_crtc *crtc;
8292         u32 handled = 0;
8293         int i;
8294
8295         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8296                                             new_crtc_state, i) {
8297                 if (!intel_crtc_needs_modeset(new_crtc_state))
8298                         continue;
8299
8300                 if (!old_crtc_state->hw.active)
8301                         continue;
8302
8303                 intel_pre_plane_update(state, crtc);
8304                 intel_crtc_disable_planes(state, crtc);
8305         }
8306
8307         /* Only disable port sync and MST slaves */
8308         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8309                                             new_crtc_state, i) {
8310                 if (!intel_crtc_needs_modeset(new_crtc_state))
8311                         continue;
8312
8313                 if (!old_crtc_state->hw.active)
8314                         continue;
8315
8316                 /* In case of Transcoder port Sync master slave CRTCs can be
8317                  * assigned in any order and we need to make sure that
8318                  * slave CRTCs are disabled first and then master CRTC since
8319                  * Slave vblanks are masked till Master Vblanks.
8320                  */
8321                 if (!is_trans_port_sync_slave(old_crtc_state) &&
8322                     !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8323                     !old_crtc_state->bigjoiner_slave)
8324                         continue;
8325
8326                 intel_old_crtc_state_disables(state, old_crtc_state,
8327                                               new_crtc_state, crtc);
8328                 handled |= BIT(crtc->pipe);
8329         }
8330
8331         /* Disable everything else left on */
8332         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8333                                             new_crtc_state, i) {
8334                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
8335                     (handled & BIT(crtc->pipe)))
8336                         continue;
8337
8338                 if (!old_crtc_state->hw.active)
8339                         continue;
8340
8341                 intel_old_crtc_state_disables(state, old_crtc_state,
8342                                               new_crtc_state, crtc);
8343         }
8344 }
8345
8346 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8347 {
8348         struct intel_crtc_state *new_crtc_state;
8349         struct intel_crtc *crtc;
8350         int i;
8351
8352         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8353                 if (!new_crtc_state->hw.active)
8354                         continue;
8355
8356                 intel_enable_crtc(state, crtc);
8357                 intel_update_crtc(state, crtc);
8358         }
8359 }
8360
8361 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8362 {
8363         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8364         struct intel_crtc *crtc;
8365         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8366         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8367         u8 update_pipes = 0, modeset_pipes = 0;
8368         int i;
8369
8370         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8371                 enum pipe pipe = crtc->pipe;
8372
8373                 if (!new_crtc_state->hw.active)
8374                         continue;
8375
8376                 /* ignore allocations for crtc's that have been turned off. */
8377                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
8378                         entries[pipe] = old_crtc_state->wm.skl.ddb;
8379                         update_pipes |= BIT(pipe);
8380                 } else {
8381                         modeset_pipes |= BIT(pipe);
8382                 }
8383         }
8384
8385         /*
8386          * Whenever the number of active pipes changes, we need to make sure we
8387          * update the pipes in the right order so that their ddb allocations
8388          * never overlap with each other between CRTC updates. Otherwise we'll
8389          * cause pipe underruns and other bad stuff.
8390          *
8391          * So first lets enable all pipes that do not need a fullmodeset as
8392          * those don't have any external dependency.
8393          */
8394         while (update_pipes) {
8395                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8396                                                     new_crtc_state, i) {
8397                         enum pipe pipe = crtc->pipe;
8398
8399                         if ((update_pipes & BIT(pipe)) == 0)
8400                                 continue;
8401
8402                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8403                                                         entries, I915_MAX_PIPES, pipe))
8404                                 continue;
8405
8406                         entries[pipe] = new_crtc_state->wm.skl.ddb;
8407                         update_pipes &= ~BIT(pipe);
8408
8409                         intel_update_crtc(state, crtc);
8410
8411                         /*
8412                          * If this is an already active pipe, it's DDB changed,
8413                          * and this isn't the last pipe that needs updating
8414                          * then we need to wait for a vblank to pass for the
8415                          * new ddb allocation to take effect.
8416                          */
8417                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8418                                                  &old_crtc_state->wm.skl.ddb) &&
8419                             (update_pipes | modeset_pipes))
8420                                 intel_crtc_wait_for_next_vblank(crtc);
8421                 }
8422         }
8423
8424         update_pipes = modeset_pipes;
8425
8426         /*
8427          * Enable all pipes that needs a modeset and do not depends on other
8428          * pipes
8429          */
8430         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8431                 enum pipe pipe = crtc->pipe;
8432
8433                 if ((modeset_pipes & BIT(pipe)) == 0)
8434                         continue;
8435
8436                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8437                     is_trans_port_sync_master(new_crtc_state) ||
8438                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8439                         continue;
8440
8441                 modeset_pipes &= ~BIT(pipe);
8442
8443                 intel_enable_crtc(state, crtc);
8444         }
8445
8446         /*
8447          * Then we enable all remaining pipes that depend on other
8448          * pipes: MST slaves and port sync masters, big joiner master
8449          */
8450         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8451                 enum pipe pipe = crtc->pipe;
8452
8453                 if ((modeset_pipes & BIT(pipe)) == 0)
8454                         continue;
8455
8456                 modeset_pipes &= ~BIT(pipe);
8457
8458                 intel_enable_crtc(state, crtc);
8459         }
8460
8461         /*
8462          * Finally we do the plane updates/etc. for all pipes that got enabled.
8463          */
8464         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8465                 enum pipe pipe = crtc->pipe;
8466
8467                 if ((update_pipes & BIT(pipe)) == 0)
8468                         continue;
8469
8470                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8471                                                                         entries, I915_MAX_PIPES, pipe));
8472
8473                 entries[pipe] = new_crtc_state->wm.skl.ddb;
8474                 update_pipes &= ~BIT(pipe);
8475
8476                 intel_update_crtc(state, crtc);
8477         }
8478
8479         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8480         drm_WARN_ON(&dev_priv->drm, update_pipes);
8481 }
8482
8483 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8484 {
8485         struct intel_atomic_state *state, *next;
8486         struct llist_node *freed;
8487
8488         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8489         llist_for_each_entry_safe(state, next, freed, freed)
8490                 drm_atomic_state_put(&state->base);
8491 }
8492
8493 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8494 {
8495         struct drm_i915_private *dev_priv =
8496                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8497
8498         intel_atomic_helper_free_state(dev_priv);
8499 }
8500
8501 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8502 {
8503         struct wait_queue_entry wait_fence, wait_reset;
8504         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8505
8506         init_wait_entry(&wait_fence, 0);
8507         init_wait_entry(&wait_reset, 0);
8508         for (;;) {
8509                 prepare_to_wait(&intel_state->commit_ready.wait,
8510                                 &wait_fence, TASK_UNINTERRUPTIBLE);
8511                 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8512                                               I915_RESET_MODESET),
8513                                 &wait_reset, TASK_UNINTERRUPTIBLE);
8514
8515
8516                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
8517                     test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
8518                         break;
8519
8520                 schedule();
8521         }
8522         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8523         finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
8524                                   I915_RESET_MODESET),
8525                     &wait_reset);
8526 }
8527
8528 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8529 {
8530         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8531         struct intel_crtc *crtc;
8532         int i;
8533
8534         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8535                                             new_crtc_state, i)
8536                 intel_dsb_cleanup(old_crtc_state);
8537 }
8538
8539 static void intel_atomic_cleanup_work(struct work_struct *work)
8540 {
8541         struct intel_atomic_state *state =
8542                 container_of(work, struct intel_atomic_state, base.commit_work);
8543         struct drm_i915_private *i915 = to_i915(state->base.dev);
8544
8545         intel_cleanup_dsbs(state);
8546         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8547         drm_atomic_helper_commit_cleanup_done(&state->base);
8548         drm_atomic_state_put(&state->base);
8549
8550         intel_atomic_helper_free_state(i915);
8551 }
8552
8553 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8554 {
8555         struct drm_i915_private *i915 = to_i915(state->base.dev);
8556         struct intel_plane *plane;
8557         struct intel_plane_state *plane_state;
8558         int i;
8559
8560         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8561                 struct drm_framebuffer *fb = plane_state->hw.fb;
8562                 int cc_plane;
8563                 int ret;
8564
8565                 if (!fb)
8566                         continue;
8567
8568                 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8569                 if (cc_plane < 0)
8570                         continue;
8571
8572                 /*
8573                  * The layout of the fast clear color value expected by HW
8574                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8575                  * - 4 x 4 bytes per-channel value
8576                  *   (in surface type specific float/int format provided by the fb user)
8577                  * - 8 bytes native color value used by the display
8578                  *   (converted/written by GPU during a fast clear operation using the
8579                  *    above per-channel values)
8580                  *
8581                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
8582                  * caller made sure that the object is synced wrt. the related color clear value
8583                  * GPU write on it.
8584                  */
8585                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8586                                                      fb->offsets[cc_plane] + 16,
8587                                                      &plane_state->ccval,
8588                                                      sizeof(plane_state->ccval));
8589                 /* The above could only fail if the FB obj has an unexpected backing store type. */
8590                 drm_WARN_ON(&i915->drm, ret);
8591         }
8592 }
8593
8594 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8595 {
8596         struct drm_device *dev = state->base.dev;
8597         struct drm_i915_private *dev_priv = to_i915(dev);
8598         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8599         struct intel_crtc *crtc;
8600         u64 put_domains[I915_MAX_PIPES] = {};
8601         intel_wakeref_t wakeref = 0;
8602         int i;
8603
8604         intel_atomic_commit_fence_wait(state);
8605
8606         drm_atomic_helper_wait_for_dependencies(&state->base);
8607
8608         if (state->modeset)
8609                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8610
8611         intel_atomic_prepare_plane_clear_colors(state);
8612
8613         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8614                                             new_crtc_state, i) {
8615                 if (intel_crtc_needs_modeset(new_crtc_state) ||
8616                     new_crtc_state->update_pipe) {
8617
8618                         put_domains[crtc->pipe] =
8619                                 modeset_get_crtc_power_domains(new_crtc_state);
8620                 }
8621         }
8622
8623         intel_commit_modeset_disables(state);
8624
8625         /* FIXME: Eventually get rid of our crtc->config pointer */
8626         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8627                 crtc->config = new_crtc_state;
8628
8629         if (state->modeset) {
8630                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8631
8632                 intel_set_cdclk_pre_plane_update(state);
8633
8634                 intel_modeset_verify_disabled(dev_priv, state);
8635         }
8636
8637         intel_sagv_pre_plane_update(state);
8638
8639         /* Complete the events for pipes that have now been disabled */
8640         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8641                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8642
8643                 /* Complete events for now disable pipes here. */
8644                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8645                         spin_lock_irq(&dev->event_lock);
8646                         drm_crtc_send_vblank_event(&crtc->base,
8647                                                    new_crtc_state->uapi.event);
8648                         spin_unlock_irq(&dev->event_lock);
8649
8650                         new_crtc_state->uapi.event = NULL;
8651                 }
8652         }
8653
8654         intel_encoders_update_prepare(state);
8655
8656         intel_dbuf_pre_plane_update(state);
8657
8658         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8659                 if (new_crtc_state->uapi.async_flip)
8660                         intel_crtc_enable_flip_done(state, crtc);
8661         }
8662
8663         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8664         dev_priv->display->commit_modeset_enables(state);
8665
8666         intel_encoders_update_complete(state);
8667
8668         if (state->modeset)
8669                 intel_set_cdclk_post_plane_update(state);
8670
8671         intel_wait_for_vblank_workers(state);
8672
8673         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8674          * already, but still need the state for the delayed optimization. To
8675          * fix this:
8676          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8677          * - schedule that vblank worker _before_ calling hw_done
8678          * - at the start of commit_tail, cancel it _synchrously
8679          * - switch over to the vblank wait helper in the core after that since
8680          *   we don't need out special handling any more.
8681          */
8682         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8683
8684         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8685                 if (new_crtc_state->uapi.async_flip)
8686                         intel_crtc_disable_flip_done(state, crtc);
8687         }
8688
8689         /*
8690          * Now that the vblank has passed, we can go ahead and program the
8691          * optimal watermarks on platforms that need two-step watermark
8692          * programming.
8693          *
8694          * TODO: Move this (and other cleanup) to an async worker eventually.
8695          */
8696         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8697                                             new_crtc_state, i) {
8698                 /*
8699                  * Gen2 reports pipe underruns whenever all planes are disabled.
8700                  * So re-enable underrun reporting after some planes get enabled.
8701                  *
8702                  * We do this before .optimize_watermarks() so that we have a
8703                  * chance of catching underruns with the intermediate watermarks
8704                  * vs. the new plane configuration.
8705                  */
8706                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8707                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8708
8709                 intel_optimize_watermarks(state, crtc);
8710         }
8711
8712         intel_dbuf_post_plane_update(state);
8713         intel_psr_post_plane_update(state);
8714
8715         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8716                 intel_post_plane_update(state, crtc);
8717
8718                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8719
8720                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8721
8722                 /*
8723                  * DSB cleanup is done in cleanup_work aligning with framebuffer
8724                  * cleanup. So copy and reset the dsb structure to sync with
8725                  * commit_done and later do dsb cleanup in cleanup_work.
8726                  */
8727                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8728         }
8729
8730         /* Underruns don't always raise interrupts, so check manually */
8731         intel_check_cpu_fifo_underruns(dev_priv);
8732         intel_check_pch_fifo_underruns(dev_priv);
8733
8734         if (state->modeset)
8735                 intel_verify_planes(state);
8736
8737         intel_sagv_post_plane_update(state);
8738
8739         drm_atomic_helper_commit_hw_done(&state->base);
8740
8741         if (state->modeset) {
8742                 /* As one of the primary mmio accessors, KMS has a high
8743                  * likelihood of triggering bugs in unclaimed access. After we
8744                  * finish modesetting, see if an error has been flagged, and if
8745                  * so enable debugging for the next modeset - and hope we catch
8746                  * the culprit.
8747                  */
8748                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8749                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8750         }
8751         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8752
8753         /*
8754          * Defer the cleanup of the old state to a separate worker to not
8755          * impede the current task (userspace for blocking modesets) that
8756          * are executed inline. For out-of-line asynchronous modesets/flips,
8757          * deferring to a new worker seems overkill, but we would place a
8758          * schedule point (cond_resched()) here anyway to keep latencies
8759          * down.
8760          */
8761         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8762         queue_work(system_highpri_wq, &state->base.commit_work);
8763 }
8764
8765 static void intel_atomic_commit_work(struct work_struct *work)
8766 {
8767         struct intel_atomic_state *state =
8768                 container_of(work, struct intel_atomic_state, base.commit_work);
8769
8770         intel_atomic_commit_tail(state);
8771 }
8772
8773 static int
8774 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8775                           enum i915_sw_fence_notify notify)
8776 {
8777         struct intel_atomic_state *state =
8778                 container_of(fence, struct intel_atomic_state, commit_ready);
8779
8780         switch (notify) {
8781         case FENCE_COMPLETE:
8782                 /* we do blocking waits in the worker, nothing to do here */
8783                 break;
8784         case FENCE_FREE:
8785                 {
8786                         struct intel_atomic_helper *helper =
8787                                 &to_i915(state->base.dev)->atomic_helper;
8788
8789                         if (llist_add(&state->freed, &helper->free_list))
8790                                 schedule_work(&helper->free_work);
8791                         break;
8792                 }
8793         }
8794
8795         return NOTIFY_DONE;
8796 }
8797
8798 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8799 {
8800         struct intel_plane_state *old_plane_state, *new_plane_state;
8801         struct intel_plane *plane;
8802         int i;
8803
8804         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8805                                              new_plane_state, i)
8806                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8807                                         to_intel_frontbuffer(new_plane_state->hw.fb),
8808                                         plane->frontbuffer_bit);
8809 }
8810
8811 static int intel_atomic_commit(struct drm_device *dev,
8812                                struct drm_atomic_state *_state,
8813                                bool nonblock)
8814 {
8815         struct intel_atomic_state *state = to_intel_atomic_state(_state);
8816         struct drm_i915_private *dev_priv = to_i915(dev);
8817         int ret = 0;
8818
8819         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8820
8821         drm_atomic_state_get(&state->base);
8822         i915_sw_fence_init(&state->commit_ready,
8823                            intel_atomic_commit_ready);
8824
8825         /*
8826          * The intel_legacy_cursor_update() fast path takes care
8827          * of avoiding the vblank waits for simple cursor
8828          * movement and flips. For cursor on/off and size changes,
8829          * we want to perform the vblank waits so that watermark
8830          * updates happen during the correct frames. Gen9+ have
8831          * double buffered watermarks and so shouldn't need this.
8832          *
8833          * Unset state->legacy_cursor_update before the call to
8834          * drm_atomic_helper_setup_commit() because otherwise
8835          * drm_atomic_helper_wait_for_flip_done() is a noop and
8836          * we get FIFO underruns because we didn't wait
8837          * for vblank.
8838          *
8839          * FIXME doing watermarks and fb cleanup from a vblank worker
8840          * (assuming we had any) would solve these problems.
8841          */
8842         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8843                 struct intel_crtc_state *new_crtc_state;
8844                 struct intel_crtc *crtc;
8845                 int i;
8846
8847                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8848                         if (new_crtc_state->wm.need_postvbl_update ||
8849                             new_crtc_state->update_wm_post)
8850                                 state->base.legacy_cursor_update = false;
8851         }
8852
8853         ret = intel_atomic_prepare_commit(state);
8854         if (ret) {
8855                 drm_dbg_atomic(&dev_priv->drm,
8856                                "Preparing state failed with %i\n", ret);
8857                 i915_sw_fence_commit(&state->commit_ready);
8858                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8859                 return ret;
8860         }
8861
8862         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8863         if (!ret)
8864                 ret = drm_atomic_helper_swap_state(&state->base, true);
8865         if (!ret)
8866                 intel_atomic_swap_global_state(state);
8867
8868         if (ret) {
8869                 struct intel_crtc_state *new_crtc_state;
8870                 struct intel_crtc *crtc;
8871                 int i;
8872
8873                 i915_sw_fence_commit(&state->commit_ready);
8874
8875                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8876                         intel_dsb_cleanup(new_crtc_state);
8877
8878                 drm_atomic_helper_cleanup_planes(dev, &state->base);
8879                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8880                 return ret;
8881         }
8882         intel_shared_dpll_swap_state(state);
8883         intel_atomic_track_fbs(state);
8884
8885         drm_atomic_state_get(&state->base);
8886         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8887
8888         i915_sw_fence_commit(&state->commit_ready);
8889         if (nonblock && state->modeset) {
8890                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8891         } else if (nonblock) {
8892                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
8893         } else {
8894                 if (state->modeset)
8895                         flush_workqueue(dev_priv->modeset_wq);
8896                 intel_atomic_commit_tail(state);
8897         }
8898
8899         return 0;
8900 }
8901
8902 /**
8903  * intel_plane_destroy - destroy a plane
8904  * @plane: plane to destroy
8905  *
8906  * Common destruction function for all types of planes (primary, cursor,
8907  * sprite).
8908  */
8909 void intel_plane_destroy(struct drm_plane *plane)
8910 {
8911         drm_plane_cleanup(plane);
8912         kfree(to_intel_plane(plane));
8913 }
8914
8915 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8916 {
8917         struct intel_plane *plane;
8918
8919         for_each_intel_plane(&dev_priv->drm, plane) {
8920                 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8921                                                               plane->pipe);
8922
8923                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8924         }
8925 }
8926
8927
8928 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8929                                       struct drm_file *file)
8930 {
8931         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8932         struct drm_crtc *drmmode_crtc;
8933         struct intel_crtc *crtc;
8934
8935         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8936         if (!drmmode_crtc)
8937                 return -ENOENT;
8938
8939         crtc = to_intel_crtc(drmmode_crtc);
8940         pipe_from_crtc_id->pipe = crtc->pipe;
8941
8942         return 0;
8943 }
8944
8945 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8946 {
8947         struct drm_device *dev = encoder->base.dev;
8948         struct intel_encoder *source_encoder;
8949         u32 possible_clones = 0;
8950
8951         for_each_intel_encoder(dev, source_encoder) {
8952                 if (encoders_cloneable(encoder, source_encoder))
8953                         possible_clones |= drm_encoder_mask(&source_encoder->base);
8954         }
8955
8956         return possible_clones;
8957 }
8958
8959 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8960 {
8961         struct drm_device *dev = encoder->base.dev;
8962         struct intel_crtc *crtc;
8963         u32 possible_crtcs = 0;
8964
8965         for_each_intel_crtc(dev, crtc) {
8966                 if (encoder->pipe_mask & BIT(crtc->pipe))
8967                         possible_crtcs |= drm_crtc_mask(&crtc->base);
8968         }
8969
8970         return possible_crtcs;
8971 }
8972
8973 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
8974 {
8975         if (!IS_MOBILE(dev_priv))
8976                 return false;
8977
8978         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
8979                 return false;
8980
8981         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
8982                 return false;
8983
8984         return true;
8985 }
8986
8987 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
8988 {
8989         if (DISPLAY_VER(dev_priv) >= 9)
8990                 return false;
8991
8992         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
8993                 return false;
8994
8995         if (HAS_PCH_LPT_H(dev_priv) &&
8996             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
8997                 return false;
8998
8999         /* DDI E can't be used if DDI A requires 4 lanes */
9000         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9001                 return false;
9002
9003         if (!dev_priv->vbt.int_crt_support)
9004                 return false;
9005
9006         return true;
9007 }
9008
9009 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9010 {
9011         struct intel_encoder *encoder;
9012         bool dpd_is_edp = false;
9013
9014         intel_pps_unlock_regs_wa(dev_priv);
9015
9016         if (!HAS_DISPLAY(dev_priv))
9017                 return;
9018
9019         if (IS_DG2(dev_priv)) {
9020                 intel_ddi_init(dev_priv, PORT_A);
9021                 intel_ddi_init(dev_priv, PORT_B);
9022                 intel_ddi_init(dev_priv, PORT_C);
9023                 intel_ddi_init(dev_priv, PORT_D_XELPD);
9024         } else if (IS_ALDERLAKE_P(dev_priv)) {
9025                 intel_ddi_init(dev_priv, PORT_A);
9026                 intel_ddi_init(dev_priv, PORT_B);
9027                 intel_ddi_init(dev_priv, PORT_TC1);
9028                 intel_ddi_init(dev_priv, PORT_TC2);
9029                 intel_ddi_init(dev_priv, PORT_TC3);
9030                 intel_ddi_init(dev_priv, PORT_TC4);
9031                 icl_dsi_init(dev_priv);
9032         } else if (IS_ALDERLAKE_S(dev_priv)) {
9033                 intel_ddi_init(dev_priv, PORT_A);
9034                 intel_ddi_init(dev_priv, PORT_TC1);
9035                 intel_ddi_init(dev_priv, PORT_TC2);
9036                 intel_ddi_init(dev_priv, PORT_TC3);
9037                 intel_ddi_init(dev_priv, PORT_TC4);
9038         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9039                 intel_ddi_init(dev_priv, PORT_A);
9040                 intel_ddi_init(dev_priv, PORT_B);
9041                 intel_ddi_init(dev_priv, PORT_TC1);
9042                 intel_ddi_init(dev_priv, PORT_TC2);
9043         } else if (DISPLAY_VER(dev_priv) >= 12) {
9044                 intel_ddi_init(dev_priv, PORT_A);
9045                 intel_ddi_init(dev_priv, PORT_B);
9046                 intel_ddi_init(dev_priv, PORT_TC1);
9047                 intel_ddi_init(dev_priv, PORT_TC2);
9048                 intel_ddi_init(dev_priv, PORT_TC3);
9049                 intel_ddi_init(dev_priv, PORT_TC4);
9050                 intel_ddi_init(dev_priv, PORT_TC5);
9051                 intel_ddi_init(dev_priv, PORT_TC6);
9052                 icl_dsi_init(dev_priv);
9053         } else if (IS_JSL_EHL(dev_priv)) {
9054                 intel_ddi_init(dev_priv, PORT_A);
9055                 intel_ddi_init(dev_priv, PORT_B);
9056                 intel_ddi_init(dev_priv, PORT_C);
9057                 intel_ddi_init(dev_priv, PORT_D);
9058                 icl_dsi_init(dev_priv);
9059         } else if (DISPLAY_VER(dev_priv) == 11) {
9060                 intel_ddi_init(dev_priv, PORT_A);
9061                 intel_ddi_init(dev_priv, PORT_B);
9062                 intel_ddi_init(dev_priv, PORT_C);
9063                 intel_ddi_init(dev_priv, PORT_D);
9064                 intel_ddi_init(dev_priv, PORT_E);
9065                 intel_ddi_init(dev_priv, PORT_F);
9066                 icl_dsi_init(dev_priv);
9067         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9068                 intel_ddi_init(dev_priv, PORT_A);
9069                 intel_ddi_init(dev_priv, PORT_B);
9070                 intel_ddi_init(dev_priv, PORT_C);
9071                 vlv_dsi_init(dev_priv);
9072         } else if (DISPLAY_VER(dev_priv) >= 9) {
9073                 intel_ddi_init(dev_priv, PORT_A);
9074                 intel_ddi_init(dev_priv, PORT_B);
9075                 intel_ddi_init(dev_priv, PORT_C);
9076                 intel_ddi_init(dev_priv, PORT_D);
9077                 intel_ddi_init(dev_priv, PORT_E);
9078         } else if (HAS_DDI(dev_priv)) {
9079                 u32 found;
9080
9081                 if (intel_ddi_crt_present(dev_priv))
9082                         intel_crt_init(dev_priv);
9083
9084                 /* Haswell uses DDI functions to detect digital outputs. */
9085                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9086                 if (found)
9087                         intel_ddi_init(dev_priv, PORT_A);
9088
9089                 found = intel_de_read(dev_priv, SFUSE_STRAP);
9090                 if (found & SFUSE_STRAP_DDIB_DETECTED)
9091                         intel_ddi_init(dev_priv, PORT_B);
9092                 if (found & SFUSE_STRAP_DDIC_DETECTED)
9093                         intel_ddi_init(dev_priv, PORT_C);
9094                 if (found & SFUSE_STRAP_DDID_DETECTED)
9095                         intel_ddi_init(dev_priv, PORT_D);
9096                 if (found & SFUSE_STRAP_DDIF_DETECTED)
9097                         intel_ddi_init(dev_priv, PORT_F);
9098         } else if (HAS_PCH_SPLIT(dev_priv)) {
9099                 int found;
9100
9101                 /*
9102                  * intel_edp_init_connector() depends on this completing first,
9103                  * to prevent the registration of both eDP and LVDS and the
9104                  * incorrect sharing of the PPS.
9105                  */
9106                 intel_lvds_init(dev_priv);
9107                 intel_crt_init(dev_priv);
9108
9109                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9110
9111                 if (ilk_has_edp_a(dev_priv))
9112                         g4x_dp_init(dev_priv, DP_A, PORT_A);
9113
9114                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9115                         /* PCH SDVOB multiplex with HDMIB */
9116                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9117                         if (!found)
9118                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9119                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9120                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9121                 }
9122
9123                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9124                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9125
9126                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9127                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9128
9129                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9130                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9131
9132                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9133                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9134         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9135                 bool has_edp, has_port;
9136
9137                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9138                         intel_crt_init(dev_priv);
9139
9140                 /*
9141                  * The DP_DETECTED bit is the latched state of the DDC
9142                  * SDA pin at boot. However since eDP doesn't require DDC
9143                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
9144                  * eDP ports may have been muxed to an alternate function.
9145                  * Thus we can't rely on the DP_DETECTED bit alone to detect
9146                  * eDP ports. Consult the VBT as well as DP_DETECTED to
9147                  * detect eDP ports.
9148                  *
9149                  * Sadly the straps seem to be missing sometimes even for HDMI
9150                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9151                  * and VBT for the presence of the port. Additionally we can't
9152                  * trust the port type the VBT declares as we've seen at least
9153                  * HDMI ports that the VBT claim are DP or eDP.
9154                  */
9155                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9156                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9157                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9158                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9159                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9160                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9161
9162                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9163                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9164                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9165                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9166                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9167                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9168
9169                 if (IS_CHERRYVIEW(dev_priv)) {
9170                         /*
9171                          * eDP not supported on port D,
9172                          * so no need to worry about it
9173                          */
9174                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9175                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9176                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9177                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9178                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9179                 }
9180
9181                 vlv_dsi_init(dev_priv);
9182         } else if (IS_PINEVIEW(dev_priv)) {
9183                 intel_lvds_init(dev_priv);
9184                 intel_crt_init(dev_priv);
9185         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9186                 bool found = false;
9187
9188                 if (IS_MOBILE(dev_priv))
9189                         intel_lvds_init(dev_priv);
9190
9191                 intel_crt_init(dev_priv);
9192
9193                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9194                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9195                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9196                         if (!found && IS_G4X(dev_priv)) {
9197                                 drm_dbg_kms(&dev_priv->drm,
9198                                             "probing HDMI on SDVOB\n");
9199                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9200                         }
9201
9202                         if (!found && IS_G4X(dev_priv))
9203                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
9204                 }
9205
9206                 /* Before G4X SDVOC doesn't have its own detect register */
9207
9208                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9209                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9210                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9211                 }
9212
9213                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9214
9215                         if (IS_G4X(dev_priv)) {
9216                                 drm_dbg_kms(&dev_priv->drm,
9217                                             "probing HDMI on SDVOC\n");
9218                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9219                         }
9220                         if (IS_G4X(dev_priv))
9221                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
9222                 }
9223
9224                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9225                         g4x_dp_init(dev_priv, DP_D, PORT_D);
9226
9227                 if (SUPPORTS_TV(dev_priv))
9228                         intel_tv_init(dev_priv);
9229         } else if (DISPLAY_VER(dev_priv) == 2) {
9230                 if (IS_I85X(dev_priv))
9231                         intel_lvds_init(dev_priv);
9232
9233                 intel_crt_init(dev_priv);
9234                 intel_dvo_init(dev_priv);
9235         }
9236
9237         for_each_intel_encoder(&dev_priv->drm, encoder) {
9238                 encoder->base.possible_crtcs =
9239                         intel_encoder_possible_crtcs(encoder);
9240                 encoder->base.possible_clones =
9241                         intel_encoder_possible_clones(encoder);
9242         }
9243
9244         intel_init_pch_refclk(dev_priv);
9245
9246         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9247 }
9248
9249 static enum drm_mode_status
9250 intel_mode_valid(struct drm_device *dev,
9251                  const struct drm_display_mode *mode)
9252 {
9253         struct drm_i915_private *dev_priv = to_i915(dev);
9254         int hdisplay_max, htotal_max;
9255         int vdisplay_max, vtotal_max;
9256
9257         /*
9258          * Can't reject DBLSCAN here because Xorg ddxen can add piles
9259          * of DBLSCAN modes to the output's mode list when they detect
9260          * the scaling mode property on the connector. And they don't
9261          * ask the kernel to validate those modes in any way until
9262          * modeset time at which point the client gets a protocol error.
9263          * So in order to not upset those clients we silently ignore the
9264          * DBLSCAN flag on such connectors. For other connectors we will
9265          * reject modes with the DBLSCAN flag in encoder->compute_config().
9266          * And we always reject DBLSCAN modes in connector->mode_valid()
9267          * as we never want such modes on the connector's mode list.
9268          */
9269
9270         if (mode->vscan > 1)
9271                 return MODE_NO_VSCAN;
9272
9273         if (mode->flags & DRM_MODE_FLAG_HSKEW)
9274                 return MODE_H_ILLEGAL;
9275
9276         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9277                            DRM_MODE_FLAG_NCSYNC |
9278                            DRM_MODE_FLAG_PCSYNC))
9279                 return MODE_HSYNC;
9280
9281         if (mode->flags & (DRM_MODE_FLAG_BCAST |
9282                            DRM_MODE_FLAG_PIXMUX |
9283                            DRM_MODE_FLAG_CLKDIV2))
9284                 return MODE_BAD;
9285
9286         /* Transcoder timing limits */
9287         if (DISPLAY_VER(dev_priv) >= 11) {
9288                 hdisplay_max = 16384;
9289                 vdisplay_max = 8192;
9290                 htotal_max = 16384;
9291                 vtotal_max = 8192;
9292         } else if (DISPLAY_VER(dev_priv) >= 9 ||
9293                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9294                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9295                 vdisplay_max = 4096;
9296                 htotal_max = 8192;
9297                 vtotal_max = 8192;
9298         } else if (DISPLAY_VER(dev_priv) >= 3) {
9299                 hdisplay_max = 4096;
9300                 vdisplay_max = 4096;
9301                 htotal_max = 8192;
9302                 vtotal_max = 8192;
9303         } else {
9304                 hdisplay_max = 2048;
9305                 vdisplay_max = 2048;
9306                 htotal_max = 4096;
9307                 vtotal_max = 4096;
9308         }
9309
9310         if (mode->hdisplay > hdisplay_max ||
9311             mode->hsync_start > htotal_max ||
9312             mode->hsync_end > htotal_max ||
9313             mode->htotal > htotal_max)
9314                 return MODE_H_ILLEGAL;
9315
9316         if (mode->vdisplay > vdisplay_max ||
9317             mode->vsync_start > vtotal_max ||
9318             mode->vsync_end > vtotal_max ||
9319             mode->vtotal > vtotal_max)
9320                 return MODE_V_ILLEGAL;
9321
9322         if (DISPLAY_VER(dev_priv) >= 5) {
9323                 if (mode->hdisplay < 64 ||
9324                     mode->htotal - mode->hdisplay < 32)
9325                         return MODE_H_ILLEGAL;
9326
9327                 if (mode->vtotal - mode->vdisplay < 5)
9328                         return MODE_V_ILLEGAL;
9329         } else {
9330                 if (mode->htotal - mode->hdisplay < 32)
9331                         return MODE_H_ILLEGAL;
9332
9333                 if (mode->vtotal - mode->vdisplay < 3)
9334                         return MODE_V_ILLEGAL;
9335         }
9336
9337         /*
9338          * Cantiga+ cannot handle modes with a hsync front porch of 0.
9339          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9340          */
9341         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9342             mode->hsync_start == mode->hdisplay)
9343                 return MODE_H_ILLEGAL;
9344
9345         return MODE_OK;
9346 }
9347
9348 enum drm_mode_status
9349 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9350                                 const struct drm_display_mode *mode,
9351                                 bool bigjoiner)
9352 {
9353         int plane_width_max, plane_height_max;
9354
9355         /*
9356          * intel_mode_valid() should be
9357          * sufficient on older platforms.
9358          */
9359         if (DISPLAY_VER(dev_priv) < 9)
9360                 return MODE_OK;
9361
9362         /*
9363          * Most people will probably want a fullscreen
9364          * plane so let's not advertize modes that are
9365          * too big for that.
9366          */
9367         if (DISPLAY_VER(dev_priv) >= 11) {
9368                 plane_width_max = 5120 << bigjoiner;
9369                 plane_height_max = 4320;
9370         } else {
9371                 plane_width_max = 5120;
9372                 plane_height_max = 4096;
9373         }
9374
9375         if (mode->hdisplay > plane_width_max)
9376                 return MODE_H_ILLEGAL;
9377
9378         if (mode->vdisplay > plane_height_max)
9379                 return MODE_V_ILLEGAL;
9380
9381         return MODE_OK;
9382 }
9383
9384 static const struct drm_mode_config_funcs intel_mode_funcs = {
9385         .fb_create = intel_user_framebuffer_create,
9386         .get_format_info = intel_fb_get_format_info,
9387         .output_poll_changed = intel_fbdev_output_poll_changed,
9388         .mode_valid = intel_mode_valid,
9389         .atomic_check = intel_atomic_check,
9390         .atomic_commit = intel_atomic_commit,
9391         .atomic_state_alloc = intel_atomic_state_alloc,
9392         .atomic_state_clear = intel_atomic_state_clear,
9393         .atomic_state_free = intel_atomic_state_free,
9394 };
9395
9396 static const struct drm_i915_display_funcs skl_display_funcs = {
9397         .get_pipe_config = hsw_get_pipe_config,
9398         .crtc_enable = hsw_crtc_enable,
9399         .crtc_disable = hsw_crtc_disable,
9400         .commit_modeset_enables = skl_commit_modeset_enables,
9401         .get_initial_plane_config = skl_get_initial_plane_config,
9402 };
9403
9404 static const struct drm_i915_display_funcs ddi_display_funcs = {
9405         .get_pipe_config = hsw_get_pipe_config,
9406         .crtc_enable = hsw_crtc_enable,
9407         .crtc_disable = hsw_crtc_disable,
9408         .commit_modeset_enables = intel_commit_modeset_enables,
9409         .get_initial_plane_config = i9xx_get_initial_plane_config,
9410 };
9411
9412 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9413         .get_pipe_config = ilk_get_pipe_config,
9414         .crtc_enable = ilk_crtc_enable,
9415         .crtc_disable = ilk_crtc_disable,
9416         .commit_modeset_enables = intel_commit_modeset_enables,
9417         .get_initial_plane_config = i9xx_get_initial_plane_config,
9418 };
9419
9420 static const struct drm_i915_display_funcs vlv_display_funcs = {
9421         .get_pipe_config = i9xx_get_pipe_config,
9422         .crtc_enable = valleyview_crtc_enable,
9423         .crtc_disable = i9xx_crtc_disable,
9424         .commit_modeset_enables = intel_commit_modeset_enables,
9425         .get_initial_plane_config = i9xx_get_initial_plane_config,
9426 };
9427
9428 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9429         .get_pipe_config = i9xx_get_pipe_config,
9430         .crtc_enable = i9xx_crtc_enable,
9431         .crtc_disable = i9xx_crtc_disable,
9432         .commit_modeset_enables = intel_commit_modeset_enables,
9433         .get_initial_plane_config = i9xx_get_initial_plane_config,
9434 };
9435
9436 /**
9437  * intel_init_display_hooks - initialize the display modesetting hooks
9438  * @dev_priv: device private
9439  */
9440 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9441 {
9442         if (!HAS_DISPLAY(dev_priv))
9443                 return;
9444
9445         intel_init_cdclk_hooks(dev_priv);
9446         intel_audio_hooks_init(dev_priv);
9447
9448         intel_dpll_init_clock_hook(dev_priv);
9449
9450         if (DISPLAY_VER(dev_priv) >= 9) {
9451                 dev_priv->display = &skl_display_funcs;
9452         } else if (HAS_DDI(dev_priv)) {
9453                 dev_priv->display = &ddi_display_funcs;
9454         } else if (HAS_PCH_SPLIT(dev_priv)) {
9455                 dev_priv->display = &pch_split_display_funcs;
9456         } else if (IS_CHERRYVIEW(dev_priv) ||
9457                    IS_VALLEYVIEW(dev_priv)) {
9458                 dev_priv->display = &vlv_display_funcs;
9459         } else {
9460                 dev_priv->display = &i9xx_display_funcs;
9461         }
9462
9463         intel_fdi_init_hook(dev_priv);
9464 }
9465
9466 void intel_modeset_init_hw(struct drm_i915_private *i915)
9467 {
9468         struct intel_cdclk_state *cdclk_state;
9469
9470         if (!HAS_DISPLAY(i915))
9471                 return;
9472
9473         cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9474
9475         intel_update_cdclk(i915);
9476         intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
9477         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9478 }
9479
9480 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9481 {
9482         struct drm_plane *plane;
9483         struct intel_crtc *crtc;
9484
9485         for_each_intel_crtc(state->dev, crtc) {
9486                 struct intel_crtc_state *crtc_state;
9487
9488                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
9489                 if (IS_ERR(crtc_state))
9490                         return PTR_ERR(crtc_state);
9491
9492                 if (crtc_state->hw.active) {
9493                         /*
9494                          * Preserve the inherited flag to avoid
9495                          * taking the full modeset path.
9496                          */
9497                         crtc_state->inherited = true;
9498                 }
9499         }
9500
9501         drm_for_each_plane(plane, state->dev) {
9502                 struct drm_plane_state *plane_state;
9503
9504                 plane_state = drm_atomic_get_plane_state(state, plane);
9505                 if (IS_ERR(plane_state))
9506                         return PTR_ERR(plane_state);
9507         }
9508
9509         return 0;
9510 }
9511
9512 /*
9513  * Calculate what we think the watermarks should be for the state we've read
9514  * out of the hardware and then immediately program those watermarks so that
9515  * we ensure the hardware settings match our internal state.
9516  *
9517  * We can calculate what we think WM's should be by creating a duplicate of the
9518  * current state (which was constructed during hardware readout) and running it
9519  * through the atomic check code to calculate new watermark values in the
9520  * state object.
9521  */
9522 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9523 {
9524         struct drm_atomic_state *state;
9525         struct intel_atomic_state *intel_state;
9526         struct intel_crtc *crtc;
9527         struct intel_crtc_state *crtc_state;
9528         struct drm_modeset_acquire_ctx ctx;
9529         int ret;
9530         int i;
9531
9532         /* Only supported on platforms that use atomic watermark design */
9533         if (!dev_priv->wm_disp->optimize_watermarks)
9534                 return;
9535
9536         state = drm_atomic_state_alloc(&dev_priv->drm);
9537         if (drm_WARN_ON(&dev_priv->drm, !state))
9538                 return;
9539
9540         intel_state = to_intel_atomic_state(state);
9541
9542         drm_modeset_acquire_init(&ctx, 0);
9543
9544 retry:
9545         state->acquire_ctx = &ctx;
9546
9547         /*
9548          * Hardware readout is the only time we don't want to calculate
9549          * intermediate watermarks (since we don't trust the current
9550          * watermarks).
9551          */
9552         if (!HAS_GMCH(dev_priv))
9553                 intel_state->skip_intermediate_wm = true;
9554
9555         ret = sanitize_watermarks_add_affected(state);
9556         if (ret)
9557                 goto fail;
9558
9559         ret = intel_atomic_check(&dev_priv->drm, state);
9560         if (ret)
9561                 goto fail;
9562
9563         /* Write calculated watermark values back */
9564         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9565                 crtc_state->wm.need_postvbl_update = true;
9566                 intel_optimize_watermarks(intel_state, crtc);
9567
9568                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9569         }
9570
9571 fail:
9572         if (ret == -EDEADLK) {
9573                 drm_atomic_state_clear(state);
9574                 drm_modeset_backoff(&ctx);
9575                 goto retry;
9576         }
9577
9578         /*
9579          * If we fail here, it means that the hardware appears to be
9580          * programmed in a way that shouldn't be possible, given our
9581          * understanding of watermark requirements.  This might mean a
9582          * mistake in the hardware readout code or a mistake in the
9583          * watermark calculations for a given platform.  Raise a WARN
9584          * so that this is noticeable.
9585          *
9586          * If this actually happens, we'll have to just leave the
9587          * BIOS-programmed watermarks untouched and hope for the best.
9588          */
9589         drm_WARN(&dev_priv->drm, ret,
9590                  "Could not determine valid watermarks for inherited state\n");
9591
9592         drm_atomic_state_put(state);
9593
9594         drm_modeset_drop_locks(&ctx);
9595         drm_modeset_acquire_fini(&ctx);
9596 }
9597
9598 static int intel_initial_commit(struct drm_device *dev)
9599 {
9600         struct drm_atomic_state *state = NULL;
9601         struct drm_modeset_acquire_ctx ctx;
9602         struct intel_crtc *crtc;
9603         int ret = 0;
9604
9605         state = drm_atomic_state_alloc(dev);
9606         if (!state)
9607                 return -ENOMEM;
9608
9609         drm_modeset_acquire_init(&ctx, 0);
9610
9611 retry:
9612         state->acquire_ctx = &ctx;
9613
9614         for_each_intel_crtc(dev, crtc) {
9615                 struct intel_crtc_state *crtc_state =
9616                         intel_atomic_get_crtc_state(state, crtc);
9617
9618                 if (IS_ERR(crtc_state)) {
9619                         ret = PTR_ERR(crtc_state);
9620                         goto out;
9621                 }
9622
9623                 if (crtc_state->hw.active) {
9624                         struct intel_encoder *encoder;
9625
9626                         /*
9627                          * We've not yet detected sink capabilities
9628                          * (audio,infoframes,etc.) and thus we don't want to
9629                          * force a full state recomputation yet. We want that to
9630                          * happen only for the first real commit from userspace.
9631                          * So preserve the inherited flag for the time being.
9632                          */
9633                         crtc_state->inherited = true;
9634
9635                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
9636                         if (ret)
9637                                 goto out;
9638
9639                         /*
9640                          * FIXME hack to force a LUT update to avoid the
9641                          * plane update forcing the pipe gamma on without
9642                          * having a proper LUT loaded. Remove once we
9643                          * have readout for pipe gamma enable.
9644                          */
9645                         crtc_state->uapi.color_mgmt_changed = true;
9646
9647                         for_each_intel_encoder_mask(dev, encoder,
9648                                                     crtc_state->uapi.encoder_mask) {
9649                                 if (encoder->initial_fastset_check &&
9650                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
9651                                         ret = drm_atomic_add_affected_connectors(state,
9652                                                                                  &crtc->base);
9653                                         if (ret)
9654                                                 goto out;
9655                                 }
9656                         }
9657                 }
9658         }
9659
9660         ret = drm_atomic_commit(state);
9661
9662 out:
9663         if (ret == -EDEADLK) {
9664                 drm_atomic_state_clear(state);
9665                 drm_modeset_backoff(&ctx);
9666                 goto retry;
9667         }
9668
9669         drm_atomic_state_put(state);
9670
9671         drm_modeset_drop_locks(&ctx);
9672         drm_modeset_acquire_fini(&ctx);
9673
9674         return ret;
9675 }
9676
9677 static void intel_mode_config_init(struct drm_i915_private *i915)
9678 {
9679         struct drm_mode_config *mode_config = &i915->drm.mode_config;
9680
9681         drm_mode_config_init(&i915->drm);
9682         INIT_LIST_HEAD(&i915->global_obj_list);
9683
9684         mode_config->min_width = 0;
9685         mode_config->min_height = 0;
9686
9687         mode_config->preferred_depth = 24;
9688         mode_config->prefer_shadow = 1;
9689
9690         mode_config->funcs = &intel_mode_funcs;
9691
9692         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9693
9694         /*
9695          * Maximum framebuffer dimensions, chosen to match
9696          * the maximum render engine surface size on gen4+.
9697          */
9698         if (DISPLAY_VER(i915) >= 7) {
9699                 mode_config->max_width = 16384;
9700                 mode_config->max_height = 16384;
9701         } else if (DISPLAY_VER(i915) >= 4) {
9702                 mode_config->max_width = 8192;
9703                 mode_config->max_height = 8192;
9704         } else if (DISPLAY_VER(i915) == 3) {
9705                 mode_config->max_width = 4096;
9706                 mode_config->max_height = 4096;
9707         } else {
9708                 mode_config->max_width = 2048;
9709                 mode_config->max_height = 2048;
9710         }
9711
9712         if (IS_I845G(i915) || IS_I865G(i915)) {
9713                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9714                 mode_config->cursor_height = 1023;
9715         } else if (IS_I830(i915) || IS_I85X(i915) ||
9716                    IS_I915G(i915) || IS_I915GM(i915)) {
9717                 mode_config->cursor_width = 64;
9718                 mode_config->cursor_height = 64;
9719         } else {
9720                 mode_config->cursor_width = 256;
9721                 mode_config->cursor_height = 256;
9722         }
9723 }
9724
9725 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9726 {
9727         intel_atomic_global_obj_cleanup(i915);
9728         drm_mode_config_cleanup(&i915->drm);
9729 }
9730
9731 /* part #1: call before irq install */
9732 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9733 {
9734         int ret;
9735
9736         if (i915_inject_probe_failure(i915))
9737                 return -ENODEV;
9738
9739         if (HAS_DISPLAY(i915)) {
9740                 ret = drm_vblank_init(&i915->drm,
9741                                       INTEL_NUM_PIPES(i915));
9742                 if (ret)
9743                         return ret;
9744         }
9745
9746         intel_bios_init(i915);
9747
9748         ret = intel_vga_register(i915);
9749         if (ret)
9750                 goto cleanup_bios;
9751
9752         /* FIXME: completely on the wrong abstraction layer */
9753         intel_power_domains_init_hw(i915, false);
9754
9755         if (!HAS_DISPLAY(i915))
9756                 return 0;
9757
9758         intel_dmc_ucode_init(i915);
9759
9760         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9761         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9762                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9763
9764         i915->framestart_delay = 1; /* 1-4 */
9765
9766         i915->window2_delay = 0; /* No DSB so no window2 delay */
9767
9768         intel_mode_config_init(i915);
9769
9770         ret = intel_cdclk_init(i915);
9771         if (ret)
9772                 goto cleanup_vga_client_pw_domain_dmc;
9773
9774         ret = intel_dbuf_init(i915);
9775         if (ret)
9776                 goto cleanup_vga_client_pw_domain_dmc;
9777
9778         ret = intel_bw_init(i915);
9779         if (ret)
9780                 goto cleanup_vga_client_pw_domain_dmc;
9781
9782         init_llist_head(&i915->atomic_helper.free_list);
9783         INIT_WORK(&i915->atomic_helper.free_work,
9784                   intel_atomic_helper_free_state_worker);
9785
9786         intel_init_quirks(i915);
9787
9788         intel_fbc_init(i915);
9789
9790         return 0;
9791
9792 cleanup_vga_client_pw_domain_dmc:
9793         intel_dmc_ucode_fini(i915);
9794         intel_power_domains_driver_remove(i915);
9795         intel_vga_unregister(i915);
9796 cleanup_bios:
9797         intel_bios_driver_remove(i915);
9798
9799         return ret;
9800 }
9801
9802 /* part #2: call after irq install, but before gem init */
9803 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9804 {
9805         struct drm_device *dev = &i915->drm;
9806         enum pipe pipe;
9807         struct intel_crtc *crtc;
9808         int ret;
9809
9810         if (!HAS_DISPLAY(i915))
9811                 return 0;
9812
9813         intel_init_pm(i915);
9814
9815         intel_panel_sanitize_ssc(i915);
9816
9817         intel_pps_setup(i915);
9818
9819         intel_gmbus_setup(i915);
9820
9821         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9822                     INTEL_NUM_PIPES(i915),
9823                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9824
9825         for_each_pipe(i915, pipe) {
9826                 ret = intel_crtc_init(i915, pipe);
9827                 if (ret) {
9828                         intel_mode_config_cleanup(i915);
9829                         return ret;
9830                 }
9831         }
9832
9833         intel_plane_possible_crtcs_init(i915);
9834         intel_shared_dpll_init(dev);
9835         intel_fdi_pll_freq_update(i915);
9836
9837         intel_update_czclk(i915);
9838         intel_modeset_init_hw(i915);
9839         intel_dpll_update_ref_clks(i915);
9840
9841         intel_hdcp_component_init(i915);
9842
9843         if (i915->max_cdclk_freq == 0)
9844                 intel_update_max_cdclk(i915);
9845
9846         /*
9847          * If the platform has HTI, we need to find out whether it has reserved
9848          * any display resources before we create our display outputs.
9849          */
9850         if (INTEL_INFO(i915)->display.has_hti)
9851                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9852
9853         /* Just disable it once at startup */
9854         intel_vga_disable(i915);
9855         intel_setup_outputs(i915);
9856
9857         drm_modeset_lock_all(dev);
9858         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9859         intel_acpi_assign_connector_fwnodes(i915);
9860         drm_modeset_unlock_all(dev);
9861
9862         for_each_intel_crtc(dev, crtc) {
9863                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9864                         continue;
9865                 intel_crtc_initial_plane_config(crtc);
9866         }
9867
9868         /*
9869          * Make sure hardware watermarks really match the state we read out.
9870          * Note that we need to do this after reconstructing the BIOS fb's
9871          * since the watermark calculation done here will use pstate->fb.
9872          */
9873         if (!HAS_GMCH(i915))
9874                 sanitize_watermarks(i915);
9875
9876         return 0;
9877 }
9878
9879 /* part #3: call after gem init */
9880 int intel_modeset_init(struct drm_i915_private *i915)
9881 {
9882         int ret;
9883
9884         if (!HAS_DISPLAY(i915))
9885                 return 0;
9886
9887         /*
9888          * Force all active planes to recompute their states. So that on
9889          * mode_setcrtc after probe, all the intel_plane_state variables
9890          * are already calculated and there is no assert_plane warnings
9891          * during bootup.
9892          */
9893         ret = intel_initial_commit(&i915->drm);
9894         if (ret)
9895                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9896
9897         intel_overlay_setup(i915);
9898
9899         ret = intel_fbdev_init(&i915->drm);
9900         if (ret)
9901                 return ret;
9902
9903         /* Only enable hotplug handling once the fbdev is fully set up. */
9904         intel_hpd_init(i915);
9905         intel_hpd_poll_disable(i915);
9906
9907         intel_init_ipc(i915);
9908
9909         return 0;
9910 }
9911
9912 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9913 {
9914         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9915         /* 640x480@60Hz, ~25175 kHz */
9916         struct dpll clock = {
9917                 .m1 = 18,
9918                 .m2 = 7,
9919                 .p1 = 13,
9920                 .p2 = 4,
9921                 .n = 2,
9922         };
9923         u32 dpll, fp;
9924         int i;
9925
9926         drm_WARN_ON(&dev_priv->drm,
9927                     i9xx_calc_dpll_params(48000, &clock) != 25154);
9928
9929         drm_dbg_kms(&dev_priv->drm,
9930                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9931                     pipe_name(pipe), clock.vco, clock.dot);
9932
9933         fp = i9xx_dpll_compute_fp(&clock);
9934         dpll = DPLL_DVO_2X_MODE |
9935                 DPLL_VGA_MODE_DIS |
9936                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9937                 PLL_P2_DIVIDE_BY_4 |
9938                 PLL_REF_INPUT_DREFCLK |
9939                 DPLL_VCO_ENABLE;
9940
9941         intel_de_write(dev_priv, FP0(pipe), fp);
9942         intel_de_write(dev_priv, FP1(pipe), fp);
9943
9944         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9945         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9946         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9947         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9948         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9949         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9950         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9951
9952         /*
9953          * Apparently we need to have VGA mode enabled prior to changing
9954          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9955          * dividers, even though the register value does change.
9956          */
9957         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9958         intel_de_write(dev_priv, DPLL(pipe), dpll);
9959
9960         /* Wait for the clocks to stabilize. */
9961         intel_de_posting_read(dev_priv, DPLL(pipe));
9962         udelay(150);
9963
9964         /* The pixel multiplier can only be updated once the
9965          * DPLL is enabled and the clocks are stable.
9966          *
9967          * So write it again.
9968          */
9969         intel_de_write(dev_priv, DPLL(pipe), dpll);
9970
9971         /* We do this three times for luck */
9972         for (i = 0; i < 3 ; i++) {
9973                 intel_de_write(dev_priv, DPLL(pipe), dpll);
9974                 intel_de_posting_read(dev_priv, DPLL(pipe));
9975                 udelay(150); /* wait for warmup */
9976         }
9977
9978         intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
9979         intel_de_posting_read(dev_priv, PIPECONF(pipe));
9980
9981         intel_wait_for_pipe_scanline_moving(crtc);
9982 }
9983
9984 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9985 {
9986         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9987
9988         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
9989                     pipe_name(pipe));
9990
9991         drm_WARN_ON(&dev_priv->drm,
9992                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
9993         drm_WARN_ON(&dev_priv->drm,
9994                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
9995         drm_WARN_ON(&dev_priv->drm,
9996                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
9997         drm_WARN_ON(&dev_priv->drm,
9998                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
9999         drm_WARN_ON(&dev_priv->drm,
10000                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
10001
10002         intel_de_write(dev_priv, PIPECONF(pipe), 0);
10003         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10004
10005         intel_wait_for_pipe_scanline_stopped(crtc);
10006
10007         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10008         intel_de_posting_read(dev_priv, DPLL(pipe));
10009 }
10010
10011 static void
10012 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10013 {
10014         struct intel_crtc *crtc;
10015
10016         if (DISPLAY_VER(dev_priv) >= 4)
10017                 return;
10018
10019         for_each_intel_crtc(&dev_priv->drm, crtc) {
10020                 struct intel_plane *plane =
10021                         to_intel_plane(crtc->base.primary);
10022                 struct intel_crtc *plane_crtc;
10023                 enum pipe pipe;
10024
10025                 if (!plane->get_hw_state(plane, &pipe))
10026                         continue;
10027
10028                 if (pipe == crtc->pipe)
10029                         continue;
10030
10031                 drm_dbg_kms(&dev_priv->drm,
10032                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10033                             plane->base.base.id, plane->base.name);
10034
10035                 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
10036                 intel_plane_disable_noatomic(plane_crtc, plane);
10037         }
10038 }
10039
10040 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10041 {
10042         struct drm_device *dev = crtc->base.dev;
10043         struct intel_encoder *encoder;
10044
10045         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10046                 return true;
10047
10048         return false;
10049 }
10050
10051 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10052 {
10053         struct drm_device *dev = encoder->base.dev;
10054         struct intel_connector *connector;
10055
10056         for_each_connector_on_encoder(dev, &encoder->base, connector)
10057                 return connector;
10058
10059         return NULL;
10060 }
10061
10062 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10063                               enum pipe pch_transcoder)
10064 {
10065         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10066                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10067 }
10068
10069 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10070 {
10071         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10072         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10073         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10074
10075         if (DISPLAY_VER(dev_priv) >= 9 ||
10076             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10077                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10078                 u32 val;
10079
10080                 if (transcoder_is_dsi(cpu_transcoder))
10081                         return;
10082
10083                 val = intel_de_read(dev_priv, reg);
10084                 val &= ~HSW_FRAME_START_DELAY_MASK;
10085                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10086                 intel_de_write(dev_priv, reg, val);
10087         } else {
10088                 i915_reg_t reg = PIPECONF(cpu_transcoder);
10089                 u32 val;
10090
10091                 val = intel_de_read(dev_priv, reg);
10092                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10093                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10094                 intel_de_write(dev_priv, reg, val);
10095         }
10096
10097         if (!crtc_state->has_pch_encoder)
10098                 return;
10099
10100         if (HAS_PCH_IBX(dev_priv)) {
10101                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10102                 u32 val;
10103
10104                 val = intel_de_read(dev_priv, reg);
10105                 val &= ~TRANS_FRAME_START_DELAY_MASK;
10106                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10107                 intel_de_write(dev_priv, reg, val);
10108         } else {
10109                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10110                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10111                 u32 val;
10112
10113                 val = intel_de_read(dev_priv, reg);
10114                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10115                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10116                 intel_de_write(dev_priv, reg, val);
10117         }
10118 }
10119
10120 static void intel_sanitize_crtc(struct intel_crtc *crtc,
10121                                 struct drm_modeset_acquire_ctx *ctx)
10122 {
10123         struct drm_device *dev = crtc->base.dev;
10124         struct drm_i915_private *dev_priv = to_i915(dev);
10125         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10126
10127         if (crtc_state->hw.active) {
10128                 struct intel_plane *plane;
10129
10130                 /* Clear any frame start delays used for debugging left by the BIOS */
10131                 intel_sanitize_frame_start_delay(crtc_state);
10132
10133                 /* Disable everything but the primary plane */
10134                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
10135                         const struct intel_plane_state *plane_state =
10136                                 to_intel_plane_state(plane->base.state);
10137
10138                         if (plane_state->uapi.visible &&
10139                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10140                                 intel_plane_disable_noatomic(crtc, plane);
10141                 }
10142
10143                 /* Disable any background color/etc. set by the BIOS */
10144                 intel_color_commit(crtc_state);
10145         }
10146
10147         /* Adjust the state of the output pipe according to whether we
10148          * have active connectors/encoders. */
10149         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10150             !crtc_state->bigjoiner_slave)
10151                 intel_crtc_disable_noatomic(crtc, ctx);
10152
10153         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10154                 /*
10155                  * We start out with underrun reporting disabled to avoid races.
10156                  * For correct bookkeeping mark this on active crtcs.
10157                  *
10158                  * Also on gmch platforms we dont have any hardware bits to
10159                  * disable the underrun reporting. Which means we need to start
10160                  * out with underrun reporting disabled also on inactive pipes,
10161                  * since otherwise we'll complain about the garbage we read when
10162                  * e.g. coming up after runtime pm.
10163                  *
10164                  * No protection against concurrent access is required - at
10165                  * worst a fifo underrun happens which also sets this to false.
10166                  */
10167                 crtc->cpu_fifo_underrun_disabled = true;
10168                 /*
10169                  * We track the PCH trancoder underrun reporting state
10170                  * within the crtc. With crtc for pipe A housing the underrun
10171                  * reporting state for PCH transcoder A, crtc for pipe B housing
10172                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10173                  * and marking underrun reporting as disabled for the non-existing
10174                  * PCH transcoders B and C would prevent enabling the south
10175                  * error interrupt (see cpt_can_enable_serr_int()).
10176                  */
10177                 if (has_pch_trancoder(dev_priv, crtc->pipe))
10178                         crtc->pch_fifo_underrun_disabled = true;
10179         }
10180 }
10181
10182 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10183 {
10184         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10185
10186         /*
10187          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10188          * the hardware when a high res displays plugged in. DPLL P
10189          * divider is zero, and the pipe timings are bonkers. We'll
10190          * try to disable everything in that case.
10191          *
10192          * FIXME would be nice to be able to sanitize this state
10193          * without several WARNs, but for now let's take the easy
10194          * road.
10195          */
10196         return IS_SANDYBRIDGE(dev_priv) &&
10197                 crtc_state->hw.active &&
10198                 crtc_state->shared_dpll &&
10199                 crtc_state->port_clock == 0;
10200 }
10201
10202 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10203 {
10204         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10205         struct intel_connector *connector;
10206         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10207         struct intel_crtc_state *crtc_state = crtc ?
10208                 to_intel_crtc_state(crtc->base.state) : NULL;
10209
10210         /* We need to check both for a crtc link (meaning that the
10211          * encoder is active and trying to read from a pipe) and the
10212          * pipe itself being active. */
10213         bool has_active_crtc = crtc_state &&
10214                 crtc_state->hw.active;
10215
10216         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10217                 drm_dbg_kms(&dev_priv->drm,
10218                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10219                             pipe_name(crtc->pipe));
10220                 has_active_crtc = false;
10221         }
10222
10223         connector = intel_encoder_find_connector(encoder);
10224         if (connector && !has_active_crtc) {
10225                 drm_dbg_kms(&dev_priv->drm,
10226                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10227                             encoder->base.base.id,
10228                             encoder->base.name);
10229
10230                 /* Connector is active, but has no active pipe. This is
10231                  * fallout from our resume register restoring. Disable
10232                  * the encoder manually again. */
10233                 if (crtc_state) {
10234                         struct drm_encoder *best_encoder;
10235
10236                         drm_dbg_kms(&dev_priv->drm,
10237                                     "[ENCODER:%d:%s] manually disabled\n",
10238                                     encoder->base.base.id,
10239                                     encoder->base.name);
10240
10241                         /* avoid oopsing in case the hooks consult best_encoder */
10242                         best_encoder = connector->base.state->best_encoder;
10243                         connector->base.state->best_encoder = &encoder->base;
10244
10245                         /* FIXME NULL atomic state passed! */
10246                         if (encoder->disable)
10247                                 encoder->disable(NULL, encoder, crtc_state,
10248                                                  connector->base.state);
10249                         if (encoder->post_disable)
10250                                 encoder->post_disable(NULL, encoder, crtc_state,
10251                                                       connector->base.state);
10252
10253                         connector->base.state->best_encoder = best_encoder;
10254                 }
10255                 encoder->base.crtc = NULL;
10256
10257                 /* Inconsistent output/port/pipe state happens presumably due to
10258                  * a bug in one of the get_hw_state functions. Or someplace else
10259                  * in our code, like the register restore mess on resume. Clamp
10260                  * things to off as a safer default. */
10261
10262                 connector->base.dpms = DRM_MODE_DPMS_OFF;
10263                 connector->base.encoder = NULL;
10264         }
10265
10266         /* notify opregion of the sanitized encoder state */
10267         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10268
10269         if (HAS_DDI(dev_priv))
10270                 intel_ddi_sanitize_encoder_pll_mapping(encoder);
10271 }
10272
10273 /* FIXME read out full plane state for all planes */
10274 static void readout_plane_state(struct drm_i915_private *dev_priv)
10275 {
10276         struct intel_plane *plane;
10277         struct intel_crtc *crtc;
10278
10279         for_each_intel_plane(&dev_priv->drm, plane) {
10280                 struct intel_plane_state *plane_state =
10281                         to_intel_plane_state(plane->base.state);
10282                 struct intel_crtc_state *crtc_state;
10283                 enum pipe pipe = PIPE_A;
10284                 bool visible;
10285
10286                 visible = plane->get_hw_state(plane, &pipe);
10287
10288                 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10289                 crtc_state = to_intel_crtc_state(crtc->base.state);
10290
10291                 intel_set_plane_visible(crtc_state, plane_state, visible);
10292
10293                 drm_dbg_kms(&dev_priv->drm,
10294                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10295                             plane->base.base.id, plane->base.name,
10296                             enableddisabled(visible), pipe_name(pipe));
10297         }
10298
10299         for_each_intel_crtc(&dev_priv->drm, crtc) {
10300                 struct intel_crtc_state *crtc_state =
10301                         to_intel_crtc_state(crtc->base.state);
10302
10303                 fixup_plane_bitmasks(crtc_state);
10304         }
10305 }
10306
10307 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10308 {
10309         struct drm_i915_private *dev_priv = to_i915(dev);
10310         struct intel_cdclk_state *cdclk_state =
10311                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10312         struct intel_dbuf_state *dbuf_state =
10313                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10314         enum pipe pipe;
10315         struct intel_crtc *crtc;
10316         struct intel_encoder *encoder;
10317         struct intel_connector *connector;
10318         struct drm_connector_list_iter conn_iter;
10319         u8 active_pipes = 0;
10320
10321         for_each_intel_crtc(dev, crtc) {
10322                 struct intel_crtc_state *crtc_state =
10323                         to_intel_crtc_state(crtc->base.state);
10324
10325                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10326                 intel_crtc_free_hw_state(crtc_state);
10327                 intel_crtc_state_reset(crtc_state, crtc);
10328
10329                 intel_crtc_get_pipe_config(crtc_state);
10330
10331                 crtc_state->hw.enable = crtc_state->hw.active;
10332
10333                 crtc->base.enabled = crtc_state->hw.enable;
10334                 crtc->active = crtc_state->hw.active;
10335
10336                 if (crtc_state->hw.active)
10337                         active_pipes |= BIT(crtc->pipe);
10338
10339                 drm_dbg_kms(&dev_priv->drm,
10340                             "[CRTC:%d:%s] hw state readout: %s\n",
10341                             crtc->base.base.id, crtc->base.name,
10342                             enableddisabled(crtc_state->hw.active));
10343         }
10344
10345         cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10346
10347         readout_plane_state(dev_priv);
10348
10349         for_each_intel_encoder(dev, encoder) {
10350                 struct intel_crtc_state *crtc_state = NULL;
10351
10352                 pipe = 0;
10353
10354                 if (encoder->get_hw_state(encoder, &pipe)) {
10355                         crtc = intel_crtc_for_pipe(dev_priv, pipe);
10356                         crtc_state = to_intel_crtc_state(crtc->base.state);
10357
10358                         encoder->base.crtc = &crtc->base;
10359                         intel_encoder_get_config(encoder, crtc_state);
10360
10361                         /* read out to slave crtc as well for bigjoiner */
10362                         if (crtc_state->bigjoiner) {
10363                                 /* encoder should read be linked to bigjoiner master */
10364                                 WARN_ON(crtc_state->bigjoiner_slave);
10365
10366                                 crtc = crtc_state->bigjoiner_linked_crtc;
10367                                 crtc_state = to_intel_crtc_state(crtc->base.state);
10368                                 intel_encoder_get_config(encoder, crtc_state);
10369                         }
10370                 } else {
10371                         encoder->base.crtc = NULL;
10372                 }
10373
10374                 if (encoder->sync_state)
10375                         encoder->sync_state(encoder, crtc_state);
10376
10377                 drm_dbg_kms(&dev_priv->drm,
10378                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10379                             encoder->base.base.id, encoder->base.name,
10380                             enableddisabled(encoder->base.crtc),
10381                             pipe_name(pipe));
10382         }
10383
10384         intel_dpll_readout_hw_state(dev_priv);
10385
10386         drm_connector_list_iter_begin(dev, &conn_iter);
10387         for_each_intel_connector_iter(connector, &conn_iter) {
10388                 if (connector->get_hw_state(connector)) {
10389                         struct intel_crtc_state *crtc_state;
10390                         struct intel_crtc *crtc;
10391
10392                         connector->base.dpms = DRM_MODE_DPMS_ON;
10393
10394                         encoder = intel_attached_encoder(connector);
10395                         connector->base.encoder = &encoder->base;
10396
10397                         crtc = to_intel_crtc(encoder->base.crtc);
10398                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10399
10400                         if (crtc_state && crtc_state->hw.active) {
10401                                 /*
10402                                  * This has to be done during hardware readout
10403                                  * because anything calling .crtc_disable may
10404                                  * rely on the connector_mask being accurate.
10405                                  */
10406                                 crtc_state->uapi.connector_mask |=
10407                                         drm_connector_mask(&connector->base);
10408                                 crtc_state->uapi.encoder_mask |=
10409                                         drm_encoder_mask(&encoder->base);
10410                         }
10411                 } else {
10412                         connector->base.dpms = DRM_MODE_DPMS_OFF;
10413                         connector->base.encoder = NULL;
10414                 }
10415                 drm_dbg_kms(&dev_priv->drm,
10416                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
10417                             connector->base.base.id, connector->base.name,
10418                             enableddisabled(connector->base.encoder));
10419         }
10420         drm_connector_list_iter_end(&conn_iter);
10421
10422         for_each_intel_crtc(dev, crtc) {
10423                 struct intel_bw_state *bw_state =
10424                         to_intel_bw_state(dev_priv->bw_obj.state);
10425                 struct intel_crtc_state *crtc_state =
10426                         to_intel_crtc_state(crtc->base.state);
10427                 struct intel_plane *plane;
10428                 int min_cdclk = 0;
10429
10430                 if (crtc_state->hw.active) {
10431                         /*
10432                          * The initial mode needs to be set in order to keep
10433                          * the atomic core happy. It wants a valid mode if the
10434                          * crtc's enabled, so we do the above call.
10435                          *
10436                          * But we don't set all the derived state fully, hence
10437                          * set a flag to indicate that a full recalculation is
10438                          * needed on the next commit.
10439                          */
10440                         crtc_state->inherited = true;
10441
10442                         intel_crtc_update_active_timings(crtc_state);
10443
10444                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
10445                 }
10446
10447                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10448                         const struct intel_plane_state *plane_state =
10449                                 to_intel_plane_state(plane->base.state);
10450
10451                         /*
10452                          * FIXME don't have the fb yet, so can't
10453                          * use intel_plane_data_rate() :(
10454                          */
10455                         if (plane_state->uapi.visible)
10456                                 crtc_state->data_rate[plane->id] =
10457                                         4 * crtc_state->pixel_rate;
10458                         /*
10459                          * FIXME don't have the fb yet, so can't
10460                          * use plane->min_cdclk() :(
10461                          */
10462                         if (plane_state->uapi.visible && plane->min_cdclk) {
10463                                 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10464                                         crtc_state->min_cdclk[plane->id] =
10465                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10466                                 else
10467                                         crtc_state->min_cdclk[plane->id] =
10468                                                 crtc_state->pixel_rate;
10469                         }
10470                         drm_dbg_kms(&dev_priv->drm,
10471                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
10472                                     plane->base.base.id, plane->base.name,
10473                                     crtc_state->min_cdclk[plane->id]);
10474                 }
10475
10476                 if (crtc_state->hw.active) {
10477                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10478                         if (drm_WARN_ON(dev, min_cdclk < 0))
10479                                 min_cdclk = 0;
10480                 }
10481
10482                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10483                 cdclk_state->min_voltage_level[crtc->pipe] =
10484                         crtc_state->min_voltage_level;
10485
10486                 intel_bw_crtc_update(bw_state, crtc_state);
10487
10488                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
10489         }
10490 }
10491
10492 static void
10493 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10494 {
10495         struct intel_encoder *encoder;
10496
10497         for_each_intel_encoder(&dev_priv->drm, encoder) {
10498                 struct intel_crtc_state *crtc_state;
10499
10500                 if (!encoder->get_power_domains)
10501                         continue;
10502
10503                 /*
10504                  * MST-primary and inactive encoders don't have a crtc state
10505                  * and neither of these require any power domain references.
10506                  */
10507                 if (!encoder->base.crtc)
10508                         continue;
10509
10510                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10511                 encoder->get_power_domains(encoder, crtc_state);
10512         }
10513 }
10514
10515 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10516 {
10517         /*
10518          * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10519          * Also known as Wa_14010480278.
10520          */
10521         if (IS_DISPLAY_VER(dev_priv, 10, 12))
10522                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10523                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10524
10525         if (IS_HASWELL(dev_priv)) {
10526                 /*
10527                  * WaRsPkgCStateDisplayPMReq:hsw
10528                  * System hang if this isn't done before disabling all planes!
10529                  */
10530                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
10531                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10532         }
10533
10534         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10535                 /* Display WA #1142:kbl,cfl,cml */
10536                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10537                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10538                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10539                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10540                              KBL_ARB_FILL_SPARE_14);
10541         }
10542 }
10543
10544 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10545                                        enum port port, i915_reg_t hdmi_reg)
10546 {
10547         u32 val = intel_de_read(dev_priv, hdmi_reg);
10548
10549         if (val & SDVO_ENABLE ||
10550             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10551                 return;
10552
10553         drm_dbg_kms(&dev_priv->drm,
10554                     "Sanitizing transcoder select for HDMI %c\n",
10555                     port_name(port));
10556
10557         val &= ~SDVO_PIPE_SEL_MASK;
10558         val |= SDVO_PIPE_SEL(PIPE_A);
10559
10560         intel_de_write(dev_priv, hdmi_reg, val);
10561 }
10562
10563 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10564                                      enum port port, i915_reg_t dp_reg)
10565 {
10566         u32 val = intel_de_read(dev_priv, dp_reg);
10567
10568         if (val & DP_PORT_EN ||
10569             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10570                 return;
10571
10572         drm_dbg_kms(&dev_priv->drm,
10573                     "Sanitizing transcoder select for DP %c\n",
10574                     port_name(port));
10575
10576         val &= ~DP_PIPE_SEL_MASK;
10577         val |= DP_PIPE_SEL(PIPE_A);
10578
10579         intel_de_write(dev_priv, dp_reg, val);
10580 }
10581
10582 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10583 {
10584         /*
10585          * The BIOS may select transcoder B on some of the PCH
10586          * ports even it doesn't enable the port. This would trip
10587          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10588          * Sanitize the transcoder select bits to prevent that. We
10589          * assume that the BIOS never actually enabled the port,
10590          * because if it did we'd actually have to toggle the port
10591          * on and back off to make the transcoder A select stick
10592          * (see. intel_dp_link_down(), intel_disable_hdmi(),
10593          * intel_disable_sdvo()).
10594          */
10595         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10596         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10597         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10598
10599         /* PCH SDVOB multiplex with HDMIB */
10600         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10601         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10602         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10603 }
10604
10605 /* Scan out the current hw modeset state,
10606  * and sanitizes it to the current state
10607  */
10608 static void
10609 intel_modeset_setup_hw_state(struct drm_device *dev,
10610                              struct drm_modeset_acquire_ctx *ctx)
10611 {
10612         struct drm_i915_private *dev_priv = to_i915(dev);
10613         struct intel_encoder *encoder;
10614         struct intel_crtc *crtc;
10615         intel_wakeref_t wakeref;
10616
10617         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10618
10619         intel_early_display_was(dev_priv);
10620         intel_modeset_readout_hw_state(dev);
10621
10622         /* HW state is read out, now we need to sanitize this mess. */
10623         get_encoder_power_domains(dev_priv);
10624
10625         if (HAS_PCH_IBX(dev_priv))
10626                 ibx_sanitize_pch_ports(dev_priv);
10627
10628         /*
10629          * intel_sanitize_plane_mapping() may need to do vblank
10630          * waits, so we need vblank interrupts restored beforehand.
10631          */
10632         for_each_intel_crtc(&dev_priv->drm, crtc) {
10633                 struct intel_crtc_state *crtc_state =
10634                         to_intel_crtc_state(crtc->base.state);
10635
10636                 drm_crtc_vblank_reset(&crtc->base);
10637
10638                 if (crtc_state->hw.active)
10639                         intel_crtc_vblank_on(crtc_state);
10640         }
10641
10642         intel_sanitize_plane_mapping(dev_priv);
10643
10644         for_each_intel_encoder(dev, encoder)
10645                 intel_sanitize_encoder(encoder);
10646
10647         for_each_intel_crtc(&dev_priv->drm, crtc) {
10648                 struct intel_crtc_state *crtc_state =
10649                         to_intel_crtc_state(crtc->base.state);
10650
10651                 intel_sanitize_crtc(crtc, ctx);
10652                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10653         }
10654
10655         intel_modeset_update_connector_atomic_state(dev);
10656
10657         intel_dpll_sanitize_state(dev_priv);
10658
10659         if (IS_G4X(dev_priv)) {
10660                 g4x_wm_get_hw_state(dev_priv);
10661                 g4x_wm_sanitize(dev_priv);
10662         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10663                 vlv_wm_get_hw_state(dev_priv);
10664                 vlv_wm_sanitize(dev_priv);
10665         } else if (DISPLAY_VER(dev_priv) >= 9) {
10666                 skl_wm_get_hw_state(dev_priv);
10667         } else if (HAS_PCH_SPLIT(dev_priv)) {
10668                 ilk_wm_get_hw_state(dev_priv);
10669         }
10670
10671         for_each_intel_crtc(dev, crtc) {
10672                 struct intel_crtc_state *crtc_state =
10673                         to_intel_crtc_state(crtc->base.state);
10674                 u64 put_domains;
10675
10676                 put_domains = modeset_get_crtc_power_domains(crtc_state);
10677                 if (drm_WARN_ON(dev, put_domains))
10678                         modeset_put_crtc_power_domains(crtc, put_domains);
10679         }
10680
10681         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10682 }
10683
10684 void intel_display_resume(struct drm_device *dev)
10685 {
10686         struct drm_i915_private *dev_priv = to_i915(dev);
10687         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10688         struct drm_modeset_acquire_ctx ctx;
10689         int ret;
10690
10691         if (!HAS_DISPLAY(dev_priv))
10692                 return;
10693
10694         dev_priv->modeset_restore_state = NULL;
10695         if (state)
10696                 state->acquire_ctx = &ctx;
10697
10698         drm_modeset_acquire_init(&ctx, 0);
10699
10700         while (1) {
10701                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
10702                 if (ret != -EDEADLK)
10703                         break;
10704
10705                 drm_modeset_backoff(&ctx);
10706         }
10707
10708         if (!ret)
10709                 ret = __intel_display_resume(dev, state, &ctx);
10710
10711         intel_enable_ipc(dev_priv);
10712         drm_modeset_drop_locks(&ctx);
10713         drm_modeset_acquire_fini(&ctx);
10714
10715         if (ret)
10716                 drm_err(&dev_priv->drm,
10717                         "Restoring old state failed with %i\n", ret);
10718         if (state)
10719                 drm_atomic_state_put(state);
10720 }
10721
10722 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10723 {
10724         struct intel_connector *connector;
10725         struct drm_connector_list_iter conn_iter;
10726
10727         /* Kill all the work that may have been queued by hpd. */
10728         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10729         for_each_intel_connector_iter(connector, &conn_iter) {
10730                 if (connector->modeset_retry_work.func)
10731                         cancel_work_sync(&connector->modeset_retry_work);
10732                 if (connector->hdcp.shim) {
10733                         cancel_delayed_work_sync(&connector->hdcp.check_work);
10734                         cancel_work_sync(&connector->hdcp.prop_work);
10735                 }
10736         }
10737         drm_connector_list_iter_end(&conn_iter);
10738 }
10739
10740 /* part #1: call before irq uninstall */
10741 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10742 {
10743         if (!HAS_DISPLAY(i915))
10744                 return;
10745
10746         flush_workqueue(i915->flip_wq);
10747         flush_workqueue(i915->modeset_wq);
10748
10749         flush_work(&i915->atomic_helper.free_work);
10750         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10751 }
10752
10753 /* part #2: call after irq uninstall */
10754 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10755 {
10756         if (!HAS_DISPLAY(i915))
10757                 return;
10758
10759         /*
10760          * Due to the hpd irq storm handling the hotplug work can re-arm the
10761          * poll handlers. Hence disable polling after hpd handling is shut down.
10762          */
10763         intel_hpd_poll_fini(i915);
10764
10765         /*
10766          * MST topology needs to be suspended so we don't have any calls to
10767          * fbdev after it's finalized. MST will be destroyed later as part of
10768          * drm_mode_config_cleanup()
10769          */
10770         intel_dp_mst_suspend(i915);
10771
10772         /* poll work can call into fbdev, hence clean that up afterwards */
10773         intel_fbdev_fini(i915);
10774
10775         intel_unregister_dsm_handler();
10776
10777         intel_fbc_global_disable(i915);
10778
10779         /* flush any delayed tasks or pending work */
10780         flush_scheduled_work();
10781
10782         intel_hdcp_component_fini(i915);
10783
10784         intel_mode_config_cleanup(i915);
10785
10786         intel_overlay_cleanup(i915);
10787
10788         intel_gmbus_teardown(i915);
10789
10790         destroy_workqueue(i915->flip_wq);
10791         destroy_workqueue(i915->modeset_wq);
10792
10793         intel_fbc_cleanup(i915);
10794 }
10795
10796 /* part #3: call after gem init */
10797 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10798 {
10799         intel_dmc_ucode_fini(i915);
10800
10801         intel_power_domains_driver_remove(i915);
10802
10803         intel_vga_unregister(i915);
10804
10805         intel_bios_driver_remove(i915);
10806 }
10807
10808 bool intel_modeset_probe_defer(struct pci_dev *pdev)
10809 {
10810         struct drm_privacy_screen *privacy_screen;
10811
10812         /*
10813          * apple-gmux is needed on dual GPU MacBook Pro
10814          * to probe the panel if we're the inactive GPU.
10815          */
10816         if (vga_switcheroo_client_probe_defer(pdev))
10817                 return true;
10818
10819         /* If the LCD panel has a privacy-screen, wait for it */
10820         privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10821         if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10822                 return true;
10823
10824         drm_privacy_screen_put(privacy_screen);
10825
10826         return false;
10827 }
10828
10829 void intel_display_driver_register(struct drm_i915_private *i915)
10830 {
10831         if (!HAS_DISPLAY(i915))
10832                 return;
10833
10834         intel_display_debugfs_register(i915);
10835
10836         /* Must be done after probing outputs */
10837         intel_opregion_register(i915);
10838         acpi_video_register();
10839
10840         intel_audio_init(i915);
10841
10842         /*
10843          * Some ports require correctly set-up hpd registers for
10844          * detection to work properly (leading to ghost connected
10845          * connector status), e.g. VGA on gm45.  Hence we can only set
10846          * up the initial fbdev config after hpd irqs are fully
10847          * enabled. We do it last so that the async config cannot run
10848          * before the connectors are registered.
10849          */
10850         intel_fbdev_initial_config_async(&i915->drm);
10851
10852         /*
10853          * We need to coordinate the hotplugs with the asynchronous
10854          * fbdev configuration, for which we use the
10855          * fbdev->async_cookie.
10856          */
10857         drm_kms_helper_poll_init(&i915->drm);
10858 }
10859
10860 void intel_display_driver_unregister(struct drm_i915_private *i915)
10861 {
10862         if (!HAS_DISPLAY(i915))
10863                 return;
10864
10865         intel_fbdev_unregister(i915);
10866         intel_audio_deinit(i915);
10867
10868         /*
10869          * After flushing the fbdev (incl. a late async config which
10870          * will have delayed queuing of a hotplug event), then flush
10871          * the hotplug events.
10872          */
10873         drm_kms_helper_poll_fini(&i915->drm);
10874         drm_atomic_helper_shutdown(&i915->drm);
10875
10876         acpi_video_unregister();
10877         intel_opregion_unregister(i915);
10878 }