drm/i915: Skip dsc readout if the transcoder is disabled
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 #include <linux/vga_switcheroo.h>
36
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_dp_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_privacy_screen_consumer.h>
46 #include <drm/drm_probe_helper.h>
47 #include <drm/drm_rect.h>
48
49 #include "display/intel_audio.h"
50 #include "display/intel_crt.h"
51 #include "display/intel_ddi.h"
52 #include "display/intel_display_debugfs.h"
53 #include "display/intel_dp.h"
54 #include "display/intel_dp_mst.h"
55 #include "display/intel_dpll.h"
56 #include "display/intel_dpll_mgr.h"
57 #include "display/intel_drrs.h"
58 #include "display/intel_dsi.h"
59 #include "display/intel_dvo.h"
60 #include "display/intel_fb.h"
61 #include "display/intel_gmbus.h"
62 #include "display/intel_hdmi.h"
63 #include "display/intel_lvds.h"
64 #include "display/intel_sdvo.h"
65 #include "display/intel_snps_phy.h"
66 #include "display/intel_tv.h"
67 #include "display/intel_vdsc.h"
68 #include "display/intel_vrr.h"
69
70 #include "gem/i915_gem_lmem.h"
71 #include "gem/i915_gem_object.h"
72
73 #include "gt/gen8_ppgtt.h"
74
75 #include "g4x_dp.h"
76 #include "g4x_hdmi.h"
77 #include "i915_drv.h"
78 #include "icl_dsi.h"
79 #include "intel_acpi.h"
80 #include "intel_atomic.h"
81 #include "intel_atomic_plane.h"
82 #include "intel_bw.h"
83 #include "intel_cdclk.h"
84 #include "intel_color.h"
85 #include "intel_crtc.h"
86 #include "intel_de.h"
87 #include "intel_display_types.h"
88 #include "intel_dmc.h"
89 #include "intel_dp_link_training.h"
90 #include "intel_dpt.h"
91 #include "intel_fbc.h"
92 #include "intel_fbdev.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdcp.h"
97 #include "intel_hotplug.h"
98 #include "intel_overlay.h"
99 #include "intel_panel.h"
100 #include "intel_pch_display.h"
101 #include "intel_pch_refclk.h"
102 #include "intel_pcode.h"
103 #include "intel_pipe_crc.h"
104 #include "intel_plane_initial.h"
105 #include "intel_pm.h"
106 #include "intel_pps.h"
107 #include "intel_psr.h"
108 #include "intel_quirks.h"
109 #include "intel_sprite.h"
110 #include "intel_tc.h"
111 #include "intel_vga.h"
112 #include "i9xx_plane.h"
113 #include "skl_scaler.h"
114 #include "skl_universal_plane.h"
115 #include "vlv_dsi_pll.h"
116 #include "vlv_sideband.h"
117 #include "vlv_dsi.h"
118
119 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
120 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
121 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
122                                          const struct intel_link_m_n *m_n,
123                                          const struct intel_link_m_n *m2_n2);
124 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
126 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
127 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
128 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
129 static void intel_modeset_setup_hw_state(struct drm_device *dev,
130                                          struct drm_modeset_acquire_ctx *ctx);
131
132 /**
133  * intel_update_watermarks - update FIFO watermark values based on current modes
134  * @dev_priv: i915 device
135  *
136  * Calculate watermark values for the various WM regs based on current mode
137  * and plane configuration.
138  *
139  * There are several cases to deal with here:
140  *   - normal (i.e. non-self-refresh)
141  *   - self-refresh (SR) mode
142  *   - lines are large relative to FIFO size (buffer can hold up to 2)
143  *   - lines are small relative to FIFO size (buffer can hold more than 2
144  *     lines), so need to account for TLB latency
145  *
146  *   The normal calculation is:
147  *     watermark = dotclock * bytes per pixel * latency
148  *   where latency is platform & configuration dependent (we assume pessimal
149  *   values here).
150  *
151  *   The SR calculation is:
152  *     watermark = (trunc(latency/line time)+1) * surface width *
153  *       bytes per pixel
154  *   where
155  *     line time = htotal / dotclock
156  *     surface width = hdisplay for normal plane and 64 for cursor
157  *   and latency is assumed to be high, as above.
158  *
159  * The final value programmed to the register should always be rounded up,
160  * and include an extra 2 entries to account for clock crossings.
161  *
162  * We don't use the sprite, so we can ignore that.  And on Crestline we have
163  * to set the non-SR watermarks to 8.
164  */
165 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
166 {
167         if (dev_priv->wm_disp->update_wm)
168                 dev_priv->wm_disp->update_wm(dev_priv);
169 }
170
171 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
172                                  struct intel_crtc *crtc)
173 {
174         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
175         if (dev_priv->wm_disp->compute_pipe_wm)
176                 return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
177         return 0;
178 }
179
180 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
181                                          struct intel_crtc *crtc)
182 {
183         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
184         if (!dev_priv->wm_disp->compute_intermediate_wm)
185                 return 0;
186         if (drm_WARN_ON(&dev_priv->drm,
187                         !dev_priv->wm_disp->compute_pipe_wm))
188                 return 0;
189         return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
190 }
191
192 static bool intel_initial_watermarks(struct intel_atomic_state *state,
193                                      struct intel_crtc *crtc)
194 {
195         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
196         if (dev_priv->wm_disp->initial_watermarks) {
197                 dev_priv->wm_disp->initial_watermarks(state, crtc);
198                 return true;
199         }
200         return false;
201 }
202
203 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
204                                            struct intel_crtc *crtc)
205 {
206         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
207         if (dev_priv->wm_disp->atomic_update_watermarks)
208                 dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
209 }
210
211 static void intel_optimize_watermarks(struct intel_atomic_state *state,
212                                       struct intel_crtc *crtc)
213 {
214         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
215         if (dev_priv->wm_disp->optimize_watermarks)
216                 dev_priv->wm_disp->optimize_watermarks(state, crtc);
217 }
218
219 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
220 {
221         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
222         if (dev_priv->wm_disp->compute_global_watermarks)
223                 return dev_priv->wm_disp->compute_global_watermarks(state);
224         return 0;
225 }
226
227 /* returns HPLL frequency in kHz */
228 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
229 {
230         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
231
232         /* Obtain SKU information */
233         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
234                 CCK_FUSE_HPLL_FREQ_MASK;
235
236         return vco_freq[hpll_freq] * 1000;
237 }
238
239 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
240                       const char *name, u32 reg, int ref_freq)
241 {
242         u32 val;
243         int divider;
244
245         val = vlv_cck_read(dev_priv, reg);
246         divider = val & CCK_FREQUENCY_VALUES;
247
248         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
249                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
250                  "%s change in progress\n", name);
251
252         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
253 }
254
255 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
256                            const char *name, u32 reg)
257 {
258         int hpll;
259
260         vlv_cck_get(dev_priv);
261
262         if (dev_priv->hpll_freq == 0)
263                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
264
265         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
266
267         vlv_cck_put(dev_priv);
268
269         return hpll;
270 }
271
272 static void intel_update_czclk(struct drm_i915_private *dev_priv)
273 {
274         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
275                 return;
276
277         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
278                                                       CCK_CZ_CLOCK_CONTROL);
279
280         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
281                 dev_priv->czclk_freq);
282 }
283
284 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
285 {
286         return (crtc_state->active_planes &
287                 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
288 }
289
290 /* WA Display #0827: Gen9:all */
291 static void
292 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
293 {
294         if (enable)
295                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
296                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
297         else
298                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
299                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
300 }
301
302 /* Wa_2006604312:icl,ehl */
303 static void
304 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
305                        bool enable)
306 {
307         if (enable)
308                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
309                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
310         else
311                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
312                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
313 }
314
315 /* Wa_1604331009:icl,jsl,ehl */
316 static void
317 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
318                        bool enable)
319 {
320         intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
321                      enable ? CURSOR_GATING_DIS : 0);
322 }
323
324 static bool
325 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
326 {
327         return crtc_state->master_transcoder != INVALID_TRANSCODER;
328 }
329
330 static bool
331 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
332 {
333         return crtc_state->sync_mode_slaves_mask != 0;
334 }
335
336 bool
337 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
338 {
339         return is_trans_port_sync_master(crtc_state) ||
340                 is_trans_port_sync_slave(crtc_state);
341 }
342
343 static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
344 {
345         if (crtc_state->bigjoiner_slave)
346                 return crtc_state->bigjoiner_linked_crtc;
347         else
348                 return to_intel_crtc(crtc_state->uapi.crtc);
349 }
350
351 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
352                                     enum pipe pipe)
353 {
354         i915_reg_t reg = PIPEDSL(pipe);
355         u32 line1, line2;
356         u32 line_mask;
357
358         if (DISPLAY_VER(dev_priv) == 2)
359                 line_mask = DSL_LINEMASK_GEN2;
360         else
361                 line_mask = DSL_LINEMASK_GEN3;
362
363         line1 = intel_de_read(dev_priv, reg) & line_mask;
364         msleep(5);
365         line2 = intel_de_read(dev_priv, reg) & line_mask;
366
367         return line1 != line2;
368 }
369
370 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
371 {
372         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
373         enum pipe pipe = crtc->pipe;
374
375         /* Wait for the display line to settle/start moving */
376         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
377                 drm_err(&dev_priv->drm,
378                         "pipe %c scanline %s wait timed out\n",
379                         pipe_name(pipe), onoff(state));
380 }
381
382 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
383 {
384         wait_for_pipe_scanline_moving(crtc, false);
385 }
386
387 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
388 {
389         wait_for_pipe_scanline_moving(crtc, true);
390 }
391
392 static void
393 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
394 {
395         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
396         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
397
398         if (DISPLAY_VER(dev_priv) >= 4) {
399                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
400                 i915_reg_t reg = PIPECONF(cpu_transcoder);
401
402                 /* Wait for the Pipe State to go off */
403                 if (intel_de_wait_for_clear(dev_priv, reg,
404                                             I965_PIPECONF_ACTIVE, 100))
405                         drm_WARN(&dev_priv->drm, 1,
406                                  "pipe_off wait timed out\n");
407         } else {
408                 intel_wait_for_pipe_scanline_stopped(crtc);
409         }
410 }
411
412 void assert_transcoder(struct drm_i915_private *dev_priv,
413                        enum transcoder cpu_transcoder, bool state)
414 {
415         bool cur_state;
416         enum intel_display_power_domain power_domain;
417         intel_wakeref_t wakeref;
418
419         /* we keep both pipes enabled on 830 */
420         if (IS_I830(dev_priv))
421                 state = true;
422
423         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
424         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
425         if (wakeref) {
426                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
427                 cur_state = !!(val & PIPECONF_ENABLE);
428
429                 intel_display_power_put(dev_priv, power_domain, wakeref);
430         } else {
431                 cur_state = false;
432         }
433
434         I915_STATE_WARN(cur_state != state,
435                         "transcoder %s assertion failure (expected %s, current %s)\n",
436                         transcoder_name(cpu_transcoder),
437                         onoff(state), onoff(cur_state));
438 }
439
440 static void assert_plane(struct intel_plane *plane, bool state)
441 {
442         enum pipe pipe;
443         bool cur_state;
444
445         cur_state = plane->get_hw_state(plane, &pipe);
446
447         I915_STATE_WARN(cur_state != state,
448                         "%s assertion failure (expected %s, current %s)\n",
449                         plane->base.name, onoff(state), onoff(cur_state));
450 }
451
452 #define assert_plane_enabled(p) assert_plane(p, true)
453 #define assert_plane_disabled(p) assert_plane(p, false)
454
455 static void assert_planes_disabled(struct intel_crtc *crtc)
456 {
457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
458         struct intel_plane *plane;
459
460         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
461                 assert_plane_disabled(plane);
462 }
463
464 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
465                          struct intel_digital_port *dig_port,
466                          unsigned int expected_mask)
467 {
468         u32 port_mask;
469         i915_reg_t dpll_reg;
470
471         switch (dig_port->base.port) {
472         case PORT_B:
473                 port_mask = DPLL_PORTB_READY_MASK;
474                 dpll_reg = DPLL(0);
475                 break;
476         case PORT_C:
477                 port_mask = DPLL_PORTC_READY_MASK;
478                 dpll_reg = DPLL(0);
479                 expected_mask <<= 4;
480                 break;
481         case PORT_D:
482                 port_mask = DPLL_PORTD_READY_MASK;
483                 dpll_reg = DPIO_PHY_STATUS;
484                 break;
485         default:
486                 BUG();
487         }
488
489         if (intel_de_wait_for_register(dev_priv, dpll_reg,
490                                        port_mask, expected_mask, 1000))
491                 drm_WARN(&dev_priv->drm, 1,
492                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
493                          dig_port->base.base.base.id, dig_port->base.base.name,
494                          intel_de_read(dev_priv, dpll_reg) & port_mask,
495                          expected_mask);
496 }
497
498 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
499 {
500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
501
502         if (HAS_PCH_LPT(dev_priv))
503                 return PIPE_A;
504         else
505                 return crtc->pipe;
506 }
507
508 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
509 {
510         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
512         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
513         enum pipe pipe = crtc->pipe;
514         i915_reg_t reg;
515         u32 val;
516
517         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
518
519         assert_planes_disabled(crtc);
520
521         /*
522          * A pipe without a PLL won't actually be able to drive bits from
523          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
524          * need the check.
525          */
526         if (HAS_GMCH(dev_priv)) {
527                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
528                         assert_dsi_pll_enabled(dev_priv);
529                 else
530                         assert_pll_enabled(dev_priv, pipe);
531         } else {
532                 if (new_crtc_state->has_pch_encoder) {
533                         /* if driving the PCH, we need FDI enabled */
534                         assert_fdi_rx_pll_enabled(dev_priv,
535                                                   intel_crtc_pch_transcoder(crtc));
536                         assert_fdi_tx_pll_enabled(dev_priv,
537                                                   (enum pipe) cpu_transcoder);
538                 }
539                 /* FIXME: assert CPU port conditions for SNB+ */
540         }
541
542         /* Wa_22012358565:adl-p */
543         if (DISPLAY_VER(dev_priv) == 13)
544                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
545                              0, PIPE_ARB_USE_PROG_SLOTS);
546
547         reg = PIPECONF(cpu_transcoder);
548         val = intel_de_read(dev_priv, reg);
549         if (val & PIPECONF_ENABLE) {
550                 /* we keep both pipes enabled on 830 */
551                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
552                 return;
553         }
554
555         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
556         intel_de_posting_read(dev_priv, reg);
557
558         /*
559          * Until the pipe starts PIPEDSL reads will return a stale value,
560          * which causes an apparent vblank timestamp jump when PIPEDSL
561          * resets to its proper value. That also messes up the frame count
562          * when it's derived from the timestamps. So let's wait for the
563          * pipe to start properly before we call drm_crtc_vblank_on()
564          */
565         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
566                 intel_wait_for_pipe_scanline_moving(crtc);
567 }
568
569 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
570 {
571         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
572         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
573         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
574         enum pipe pipe = crtc->pipe;
575         i915_reg_t reg;
576         u32 val;
577
578         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
579
580         /*
581          * Make sure planes won't keep trying to pump pixels to us,
582          * or we might hang the display.
583          */
584         assert_planes_disabled(crtc);
585
586         reg = PIPECONF(cpu_transcoder);
587         val = intel_de_read(dev_priv, reg);
588         if ((val & PIPECONF_ENABLE) == 0)
589                 return;
590
591         /*
592          * Double wide has implications for planes
593          * so best keep it disabled when not needed.
594          */
595         if (old_crtc_state->double_wide)
596                 val &= ~PIPECONF_DOUBLE_WIDE;
597
598         /* Don't disable pipe or pipe PLLs if needed */
599         if (!IS_I830(dev_priv))
600                 val &= ~PIPECONF_ENABLE;
601
602         if (DISPLAY_VER(dev_priv) >= 12)
603                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
604                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
605
606         intel_de_write(dev_priv, reg, val);
607         if ((val & PIPECONF_ENABLE) == 0)
608                 intel_wait_for_pipe_off(old_crtc_state);
609 }
610
611 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
612 {
613         unsigned int size = 0;
614         int i;
615
616         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
617                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
618
619         return size;
620 }
621
622 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
623 {
624         unsigned int size = 0;
625         int i;
626
627         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
628                 unsigned int plane_size;
629
630                 if (rem_info->plane[i].linear)
631                         plane_size = rem_info->plane[i].size;
632                 else
633                         plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
634
635                 if (plane_size == 0)
636                         continue;
637
638                 if (rem_info->plane_alignment)
639                         size = ALIGN(size, rem_info->plane_alignment);
640
641                 size += plane_size;
642         }
643
644         return size;
645 }
646
647 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
648 {
649         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
650         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
651
652         return DISPLAY_VER(dev_priv) < 4 ||
653                 (plane->fbc &&
654                  plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
655 }
656
657 /*
658  * Convert the x/y offsets into a linear offset.
659  * Only valid with 0/180 degree rotation, which is fine since linear
660  * offset is only used with linear buffers on pre-hsw and tiled buffers
661  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
662  */
663 u32 intel_fb_xy_to_linear(int x, int y,
664                           const struct intel_plane_state *state,
665                           int color_plane)
666 {
667         const struct drm_framebuffer *fb = state->hw.fb;
668         unsigned int cpp = fb->format->cpp[color_plane];
669         unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
670
671         return y * pitch + x * cpp;
672 }
673
674 /*
675  * Add the x/y offsets derived from fb->offsets[] to the user
676  * specified plane src x/y offsets. The resulting x/y offsets
677  * specify the start of scanout from the beginning of the gtt mapping.
678  */
679 void intel_add_fb_offsets(int *x, int *y,
680                           const struct intel_plane_state *state,
681                           int color_plane)
682
683 {
684         *x += state->view.color_plane[color_plane].x;
685         *y += state->view.color_plane[color_plane].y;
686 }
687
688 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
689                               u32 pixel_format, u64 modifier)
690 {
691         struct intel_crtc *crtc;
692         struct intel_plane *plane;
693
694         if (!HAS_DISPLAY(dev_priv))
695                 return 0;
696
697         /*
698          * We assume the primary plane for pipe A has
699          * the highest stride limits of them all,
700          * if in case pipe A is disabled, use the first pipe from pipe_mask.
701          */
702         crtc = intel_first_crtc(dev_priv);
703         if (!crtc)
704                 return 0;
705
706         plane = to_intel_plane(crtc->base.primary);
707
708         return plane->max_stride(plane, pixel_format, modifier,
709                                  DRM_MODE_ROTATE_0);
710 }
711
712 static void
713 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
714                         struct intel_plane_state *plane_state,
715                         bool visible)
716 {
717         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
718
719         plane_state->uapi.visible = visible;
720
721         if (visible)
722                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
723         else
724                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
725 }
726
727 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
728 {
729         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
730         struct drm_plane *plane;
731
732         /*
733          * Active_planes aliases if multiple "primary" or cursor planes
734          * have been used on the same (or wrong) pipe. plane_mask uses
735          * unique ids, hence we can use that to reconstruct active_planes.
736          */
737         crtc_state->enabled_planes = 0;
738         crtc_state->active_planes = 0;
739
740         drm_for_each_plane_mask(plane, &dev_priv->drm,
741                                 crtc_state->uapi.plane_mask) {
742                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
743                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
744         }
745 }
746
747 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
748                                   struct intel_plane *plane)
749 {
750         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
751         struct intel_crtc_state *crtc_state =
752                 to_intel_crtc_state(crtc->base.state);
753         struct intel_plane_state *plane_state =
754                 to_intel_plane_state(plane->base.state);
755
756         drm_dbg_kms(&dev_priv->drm,
757                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
758                     plane->base.base.id, plane->base.name,
759                     crtc->base.base.id, crtc->base.name);
760
761         intel_set_plane_visible(crtc_state, plane_state, false);
762         fixup_plane_bitmasks(crtc_state);
763         crtc_state->data_rate[plane->id] = 0;
764         crtc_state->min_cdclk[plane->id] = 0;
765
766         if (plane->id == PLANE_PRIMARY)
767                 hsw_disable_ips(crtc_state);
768
769         /*
770          * Vblank time updates from the shadow to live plane control register
771          * are blocked if the memory self-refresh mode is active at that
772          * moment. So to make sure the plane gets truly disabled, disable
773          * first the self-refresh mode. The self-refresh enable bit in turn
774          * will be checked/applied by the HW only at the next frame start
775          * event which is after the vblank start event, so we need to have a
776          * wait-for-vblank between disabling the plane and the pipe.
777          */
778         if (HAS_GMCH(dev_priv) &&
779             intel_set_memory_cxsr(dev_priv, false))
780                 intel_crtc_wait_for_next_vblank(crtc);
781
782         /*
783          * Gen2 reports pipe underruns whenever all planes are disabled.
784          * So disable underrun reporting before all the planes get disabled.
785          */
786         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
787                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
788
789         intel_plane_disable_arm(plane, crtc_state);
790         intel_crtc_wait_for_next_vblank(crtc);
791 }
792
793 unsigned int
794 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
795 {
796         int x = 0, y = 0;
797
798         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
799                                           plane_state->view.color_plane[0].offset, 0);
800
801         return y;
802 }
803
804 static int
805 __intel_display_resume(struct drm_device *dev,
806                        struct drm_atomic_state *state,
807                        struct drm_modeset_acquire_ctx *ctx)
808 {
809         struct drm_crtc_state *crtc_state;
810         struct drm_crtc *crtc;
811         int i, ret;
812
813         intel_modeset_setup_hw_state(dev, ctx);
814         intel_vga_redisable(to_i915(dev));
815
816         if (!state)
817                 return 0;
818
819         /*
820          * We've duplicated the state, pointers to the old state are invalid.
821          *
822          * Don't attempt to use the old state until we commit the duplicated state.
823          */
824         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
825                 /*
826                  * Force recalculation even if we restore
827                  * current state. With fast modeset this may not result
828                  * in a modeset when the state is compatible.
829                  */
830                 crtc_state->mode_changed = true;
831         }
832
833         /* ignore any reset values/BIOS leftovers in the WM registers */
834         if (!HAS_GMCH(to_i915(dev)))
835                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
836
837         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
838
839         drm_WARN_ON(dev, ret == -EDEADLK);
840         return ret;
841 }
842
843 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
844 {
845         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
846                 intel_has_gpu_reset(&dev_priv->gt));
847 }
848
849 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
850 {
851         struct drm_device *dev = &dev_priv->drm;
852         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
853         struct drm_atomic_state *state;
854         int ret;
855
856         if (!HAS_DISPLAY(dev_priv))
857                 return;
858
859         /* reset doesn't touch the display */
860         if (!dev_priv->params.force_reset_modeset_test &&
861             !gpu_reset_clobbers_display(dev_priv))
862                 return;
863
864         /* We have a modeset vs reset deadlock, defensively unbreak it. */
865         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
866         smp_mb__after_atomic();
867         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
868
869         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
870                 drm_dbg_kms(&dev_priv->drm,
871                             "Modeset potentially stuck, unbreaking through wedging\n");
872                 intel_gt_set_wedged(&dev_priv->gt);
873         }
874
875         /*
876          * Need mode_config.mutex so that we don't
877          * trample ongoing ->detect() and whatnot.
878          */
879         mutex_lock(&dev->mode_config.mutex);
880         drm_modeset_acquire_init(ctx, 0);
881         while (1) {
882                 ret = drm_modeset_lock_all_ctx(dev, ctx);
883                 if (ret != -EDEADLK)
884                         break;
885
886                 drm_modeset_backoff(ctx);
887         }
888         /*
889          * Disabling the crtcs gracefully seems nicer. Also the
890          * g33 docs say we should at least disable all the planes.
891          */
892         state = drm_atomic_helper_duplicate_state(dev, ctx);
893         if (IS_ERR(state)) {
894                 ret = PTR_ERR(state);
895                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
896                         ret);
897                 return;
898         }
899
900         ret = drm_atomic_helper_disable_all(dev, ctx);
901         if (ret) {
902                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
903                         ret);
904                 drm_atomic_state_put(state);
905                 return;
906         }
907
908         dev_priv->modeset_restore_state = state;
909         state->acquire_ctx = ctx;
910 }
911
912 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
913 {
914         struct drm_device *dev = &dev_priv->drm;
915         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
916         struct drm_atomic_state *state;
917         int ret;
918
919         if (!HAS_DISPLAY(dev_priv))
920                 return;
921
922         /* reset doesn't touch the display */
923         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
924                 return;
925
926         state = fetch_and_zero(&dev_priv->modeset_restore_state);
927         if (!state)
928                 goto unlock;
929
930         /* reset doesn't touch the display */
931         if (!gpu_reset_clobbers_display(dev_priv)) {
932                 /* for testing only restore the display */
933                 ret = __intel_display_resume(dev, state, ctx);
934                 if (ret)
935                         drm_err(&dev_priv->drm,
936                                 "Restoring old state failed with %i\n", ret);
937         } else {
938                 /*
939                  * The display has been reset as well,
940                  * so need a full re-initialization.
941                  */
942                 intel_pps_unlock_regs_wa(dev_priv);
943                 intel_modeset_init_hw(dev_priv);
944                 intel_init_clock_gating(dev_priv);
945                 intel_hpd_init(dev_priv);
946
947                 ret = __intel_display_resume(dev, state, ctx);
948                 if (ret)
949                         drm_err(&dev_priv->drm,
950                                 "Restoring old state failed with %i\n", ret);
951
952                 intel_hpd_poll_disable(dev_priv);
953         }
954
955         drm_atomic_state_put(state);
956 unlock:
957         drm_modeset_drop_locks(ctx);
958         drm_modeset_acquire_fini(ctx);
959         mutex_unlock(&dev->mode_config.mutex);
960
961         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
962 }
963
964 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
965 {
966         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
967         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
968         enum pipe pipe = crtc->pipe;
969         u32 tmp;
970
971         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
972
973         /*
974          * Display WA #1153: icl
975          * enable hardware to bypass the alpha math
976          * and rounding for per-pixel values 00 and 0xff
977          */
978         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
979         /*
980          * Display WA # 1605353570: icl
981          * Set the pixel rounding bit to 1 for allowing
982          * passthrough of Frame buffer pixels unmodified
983          * across pipe
984          */
985         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
986
987         /*
988          * Underrun recovery must always be disabled on display 13+.
989          * DG2 chicken bit meaning is inverted compared to other platforms.
990          */
991         if (IS_DG2(dev_priv))
992                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
993         else if (DISPLAY_VER(dev_priv) >= 13)
994                 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
995
996         /* Wa_14010547955:dg2 */
997         if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
998                 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
999
1000         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1001 }
1002
1003 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1004 {
1005         struct drm_crtc *crtc;
1006         bool cleanup_done;
1007
1008         drm_for_each_crtc(crtc, &dev_priv->drm) {
1009                 struct drm_crtc_commit *commit;
1010                 spin_lock(&crtc->commit_lock);
1011                 commit = list_first_entry_or_null(&crtc->commit_list,
1012                                                   struct drm_crtc_commit, commit_entry);
1013                 cleanup_done = commit ?
1014                         try_wait_for_completion(&commit->cleanup_done) : true;
1015                 spin_unlock(&crtc->commit_lock);
1016
1017                 if (cleanup_done)
1018                         continue;
1019
1020                 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
1021
1022                 return true;
1023         }
1024
1025         return false;
1026 }
1027
1028 /*
1029  * Finds the encoder associated with the given CRTC. This can only be
1030  * used when we know that the CRTC isn't feeding multiple encoders!
1031  */
1032 struct intel_encoder *
1033 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1034                            const struct intel_crtc_state *crtc_state)
1035 {
1036         const struct drm_connector_state *connector_state;
1037         const struct drm_connector *connector;
1038         struct intel_encoder *encoder = NULL;
1039         struct intel_crtc *master_crtc;
1040         int num_encoders = 0;
1041         int i;
1042
1043         master_crtc = intel_master_crtc(crtc_state);
1044
1045         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1046                 if (connector_state->crtc != &master_crtc->base)
1047                         continue;
1048
1049                 encoder = to_intel_encoder(connector_state->best_encoder);
1050                 num_encoders++;
1051         }
1052
1053         drm_WARN(encoder->base.dev, num_encoders != 1,
1054                  "%d encoders for pipe %c\n",
1055                  num_encoders, pipe_name(master_crtc->pipe));
1056
1057         return encoder;
1058 }
1059
1060 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1061                                enum pipe pipe)
1062 {
1063         i915_reg_t dslreg = PIPEDSL(pipe);
1064         u32 temp;
1065
1066         temp = intel_de_read(dev_priv, dslreg);
1067         udelay(500);
1068         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1069                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1070                         drm_err(&dev_priv->drm,
1071                                 "mode set failed: pipe %c stuck\n",
1072                                 pipe_name(pipe));
1073         }
1074 }
1075
1076 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1077 {
1078         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1079         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1080         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1081         enum pipe pipe = crtc->pipe;
1082         int width = drm_rect_width(dst);
1083         int height = drm_rect_height(dst);
1084         int x = dst->x1;
1085         int y = dst->y1;
1086
1087         if (!crtc_state->pch_pfit.enabled)
1088                 return;
1089
1090         /* Force use of hard-coded filter coefficients
1091          * as some pre-programmed values are broken,
1092          * e.g. x201.
1093          */
1094         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1095                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1096                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1097         else
1098                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1099                                PF_FILTER_MED_3x3);
1100         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1101         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1102 }
1103
1104 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1105 {
1106         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1107         struct drm_device *dev = crtc->base.dev;
1108         struct drm_i915_private *dev_priv = to_i915(dev);
1109
1110         if (!crtc_state->ips_enabled)
1111                 return;
1112
1113         /*
1114          * We can only enable IPS after we enable a plane and wait for a vblank
1115          * This function is called from post_plane_update, which is run after
1116          * a vblank wait.
1117          */
1118         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1119
1120         if (IS_BROADWELL(dev_priv)) {
1121                 drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1122                                                  IPS_ENABLE | IPS_PCODE_CONTROL));
1123                 /* Quoting Art Runyan: "its not safe to expect any particular
1124                  * value in IPS_CTL bit 31 after enabling IPS through the
1125                  * mailbox." Moreover, the mailbox may return a bogus state,
1126                  * so we need to just enable it and continue on.
1127                  */
1128         } else {
1129                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1130                 /* The bit only becomes 1 in the next vblank, so this wait here
1131                  * is essentially intel_wait_for_vblank. If we don't have this
1132                  * and don't wait for vblanks until the end of crtc_enable, then
1133                  * the HW state readout code will complain that the expected
1134                  * IPS_CTL value is not the one we read. */
1135                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1136                         drm_err(&dev_priv->drm,
1137                                 "Timed out waiting for IPS enable\n");
1138         }
1139 }
1140
1141 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1142 {
1143         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1144         struct drm_device *dev = crtc->base.dev;
1145         struct drm_i915_private *dev_priv = to_i915(dev);
1146
1147         if (!crtc_state->ips_enabled)
1148                 return;
1149
1150         if (IS_BROADWELL(dev_priv)) {
1151                 drm_WARN_ON(dev,
1152                             snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1153                 /*
1154                  * Wait for PCODE to finish disabling IPS. The BSpec specified
1155                  * 42ms timeout value leads to occasional timeouts so use 100ms
1156                  * instead.
1157                  */
1158                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1159                         drm_err(&dev_priv->drm,
1160                                 "Timed out waiting for IPS disable\n");
1161         } else {
1162                 intel_de_write(dev_priv, IPS_CTL, 0);
1163                 intel_de_posting_read(dev_priv, IPS_CTL);
1164         }
1165
1166         /* We need to wait for a vblank before we can disable the plane. */
1167         intel_crtc_wait_for_next_vblank(crtc);
1168 }
1169
1170 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1171 {
1172         if (crtc->overlay)
1173                 (void) intel_overlay_switch_off(crtc->overlay);
1174
1175         /* Let userspace switch the overlay on again. In most cases userspace
1176          * has to recompute where to put it anyway.
1177          */
1178 }
1179
1180 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1181                                        const struct intel_crtc_state *new_crtc_state)
1182 {
1183         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1184         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1185
1186         if (!old_crtc_state->ips_enabled)
1187                 return false;
1188
1189         if (intel_crtc_needs_modeset(new_crtc_state))
1190                 return true;
1191
1192         /*
1193          * Workaround : Do not read or write the pipe palette/gamma data while
1194          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1195          *
1196          * Disable IPS before we program the LUT.
1197          */
1198         if (IS_HASWELL(dev_priv) &&
1199             (new_crtc_state->uapi.color_mgmt_changed ||
1200              new_crtc_state->update_pipe) &&
1201             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1202                 return true;
1203
1204         return !new_crtc_state->ips_enabled;
1205 }
1206
1207 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1208                                        const struct intel_crtc_state *new_crtc_state)
1209 {
1210         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1211         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1212
1213         if (!new_crtc_state->ips_enabled)
1214                 return false;
1215
1216         if (intel_crtc_needs_modeset(new_crtc_state))
1217                 return true;
1218
1219         /*
1220          * Workaround : Do not read or write the pipe palette/gamma data while
1221          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1222          *
1223          * Re-enable IPS after the LUT has been programmed.
1224          */
1225         if (IS_HASWELL(dev_priv) &&
1226             (new_crtc_state->uapi.color_mgmt_changed ||
1227              new_crtc_state->update_pipe) &&
1228             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1229                 return true;
1230
1231         /*
1232          * We can't read out IPS on broadwell, assume the worst and
1233          * forcibly enable IPS on the first fastset.
1234          */
1235         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1236                 return true;
1237
1238         return !old_crtc_state->ips_enabled;
1239 }
1240
1241 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1242 {
1243         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1244
1245         if (!crtc_state->nv12_planes)
1246                 return false;
1247
1248         /* WA Display #0827: Gen9:all */
1249         if (DISPLAY_VER(dev_priv) == 9)
1250                 return true;
1251
1252         return false;
1253 }
1254
1255 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1256 {
1257         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1258
1259         /* Wa_2006604312:icl,ehl */
1260         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1261                 return true;
1262
1263         return false;
1264 }
1265
1266 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1267 {
1268         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1269
1270         /* Wa_1604331009:icl,jsl,ehl */
1271         if (is_hdr_mode(crtc_state) &&
1272             crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1273             DISPLAY_VER(dev_priv) == 11)
1274                 return true;
1275
1276         return false;
1277 }
1278
1279 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1280                                     enum pipe pipe, bool enable)
1281 {
1282         if (DISPLAY_VER(i915) == 9) {
1283                 /*
1284                  * "Plane N strech max must be programmed to 11b (x1)
1285                  *  when Async flips are enabled on that plane."
1286                  */
1287                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1288                              SKL_PLANE1_STRETCH_MAX_MASK,
1289                              enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1290         } else {
1291                 /* Also needed on HSW/BDW albeit undocumented */
1292                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1293                              HSW_PRI_STRETCH_MAX_MASK,
1294                              enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1295         }
1296 }
1297
1298 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1299 {
1300         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1301
1302         return crtc_state->uapi.async_flip && intel_vtd_active() &&
1303                 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1304 }
1305
1306 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1307                             const struct intel_crtc_state *new_crtc_state)
1308 {
1309         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1310                 new_crtc_state->active_planes;
1311 }
1312
1313 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1314                              const struct intel_crtc_state *new_crtc_state)
1315 {
1316         return old_crtc_state->active_planes &&
1317                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1318 }
1319
1320 static void intel_post_plane_update(struct intel_atomic_state *state,
1321                                     struct intel_crtc *crtc)
1322 {
1323         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1324         const struct intel_crtc_state *old_crtc_state =
1325                 intel_atomic_get_old_crtc_state(state, crtc);
1326         const struct intel_crtc_state *new_crtc_state =
1327                 intel_atomic_get_new_crtc_state(state, crtc);
1328         enum pipe pipe = crtc->pipe;
1329
1330         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1331
1332         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1333                 intel_update_watermarks(dev_priv);
1334
1335         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1336                 hsw_enable_ips(new_crtc_state);
1337
1338         intel_fbc_post_update(state, crtc);
1339         intel_drrs_page_flip(state, crtc);
1340
1341         if (needs_async_flip_vtd_wa(old_crtc_state) &&
1342             !needs_async_flip_vtd_wa(new_crtc_state))
1343                 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1344
1345         if (needs_nv12_wa(old_crtc_state) &&
1346             !needs_nv12_wa(new_crtc_state))
1347                 skl_wa_827(dev_priv, pipe, false);
1348
1349         if (needs_scalerclk_wa(old_crtc_state) &&
1350             !needs_scalerclk_wa(new_crtc_state))
1351                 icl_wa_scalerclkgating(dev_priv, pipe, false);
1352
1353         if (needs_cursorclk_wa(old_crtc_state) &&
1354             !needs_cursorclk_wa(new_crtc_state))
1355                 icl_wa_cursorclkgating(dev_priv, pipe, false);
1356
1357 }
1358
1359 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1360                                         struct intel_crtc *crtc)
1361 {
1362         const struct intel_crtc_state *crtc_state =
1363                 intel_atomic_get_new_crtc_state(state, crtc);
1364         u8 update_planes = crtc_state->update_planes;
1365         const struct intel_plane_state *plane_state;
1366         struct intel_plane *plane;
1367         int i;
1368
1369         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1370                 if (plane->enable_flip_done &&
1371                     plane->pipe == crtc->pipe &&
1372                     update_planes & BIT(plane->id) &&
1373                     plane_state->do_async_flip)
1374                         plane->enable_flip_done(plane);
1375         }
1376 }
1377
1378 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1379                                          struct intel_crtc *crtc)
1380 {
1381         const struct intel_crtc_state *crtc_state =
1382                 intel_atomic_get_new_crtc_state(state, crtc);
1383         u8 update_planes = crtc_state->update_planes;
1384         const struct intel_plane_state *plane_state;
1385         struct intel_plane *plane;
1386         int i;
1387
1388         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1389                 if (plane->disable_flip_done &&
1390                     plane->pipe == crtc->pipe &&
1391                     update_planes & BIT(plane->id) &&
1392                     plane_state->do_async_flip)
1393                         plane->disable_flip_done(plane);
1394         }
1395 }
1396
1397 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1398                                              struct intel_crtc *crtc)
1399 {
1400         const struct intel_crtc_state *old_crtc_state =
1401                 intel_atomic_get_old_crtc_state(state, crtc);
1402         const struct intel_crtc_state *new_crtc_state =
1403                 intel_atomic_get_new_crtc_state(state, crtc);
1404         u8 update_planes = new_crtc_state->update_planes;
1405         const struct intel_plane_state *old_plane_state;
1406         struct intel_plane *plane;
1407         bool need_vbl_wait = false;
1408         int i;
1409
1410         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1411                 if (plane->need_async_flip_disable_wa &&
1412                     plane->pipe == crtc->pipe &&
1413                     update_planes & BIT(plane->id)) {
1414                         /*
1415                          * Apart from the async flip bit we want to
1416                          * preserve the old state for the plane.
1417                          */
1418                         plane->async_flip(plane, old_crtc_state,
1419                                           old_plane_state, false);
1420                         need_vbl_wait = true;
1421                 }
1422         }
1423
1424         if (need_vbl_wait)
1425                 intel_crtc_wait_for_next_vblank(crtc);
1426 }
1427
1428 static void intel_pre_plane_update(struct intel_atomic_state *state,
1429                                    struct intel_crtc *crtc)
1430 {
1431         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1432         const struct intel_crtc_state *old_crtc_state =
1433                 intel_atomic_get_old_crtc_state(state, crtc);
1434         const struct intel_crtc_state *new_crtc_state =
1435                 intel_atomic_get_new_crtc_state(state, crtc);
1436         enum pipe pipe = crtc->pipe;
1437
1438         intel_psr_pre_plane_update(state, crtc);
1439
1440         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1441                 hsw_disable_ips(old_crtc_state);
1442
1443         if (intel_fbc_pre_update(state, crtc))
1444                 intel_crtc_wait_for_next_vblank(crtc);
1445
1446         if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1447             needs_async_flip_vtd_wa(new_crtc_state))
1448                 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1449
1450         /* Display WA 827 */
1451         if (!needs_nv12_wa(old_crtc_state) &&
1452             needs_nv12_wa(new_crtc_state))
1453                 skl_wa_827(dev_priv, pipe, true);
1454
1455         /* Wa_2006604312:icl,ehl */
1456         if (!needs_scalerclk_wa(old_crtc_state) &&
1457             needs_scalerclk_wa(new_crtc_state))
1458                 icl_wa_scalerclkgating(dev_priv, pipe, true);
1459
1460         /* Wa_1604331009:icl,jsl,ehl */
1461         if (!needs_cursorclk_wa(old_crtc_state) &&
1462             needs_cursorclk_wa(new_crtc_state))
1463                 icl_wa_cursorclkgating(dev_priv, pipe, true);
1464
1465         /*
1466          * Vblank time updates from the shadow to live plane control register
1467          * are blocked if the memory self-refresh mode is active at that
1468          * moment. So to make sure the plane gets truly disabled, disable
1469          * first the self-refresh mode. The self-refresh enable bit in turn
1470          * will be checked/applied by the HW only at the next frame start
1471          * event which is after the vblank start event, so we need to have a
1472          * wait-for-vblank between disabling the plane and the pipe.
1473          */
1474         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1475             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1476                 intel_crtc_wait_for_next_vblank(crtc);
1477
1478         /*
1479          * IVB workaround: must disable low power watermarks for at least
1480          * one frame before enabling scaling.  LP watermarks can be re-enabled
1481          * when scaling is disabled.
1482          *
1483          * WaCxSRDisabledForSpriteScaling:ivb
1484          */
1485         if (old_crtc_state->hw.active &&
1486             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1487                 intel_crtc_wait_for_next_vblank(crtc);
1488
1489         /*
1490          * If we're doing a modeset we don't need to do any
1491          * pre-vblank watermark programming here.
1492          */
1493         if (!intel_crtc_needs_modeset(new_crtc_state)) {
1494                 /*
1495                  * For platforms that support atomic watermarks, program the
1496                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1497                  * will be the intermediate values that are safe for both pre- and
1498                  * post- vblank; when vblank happens, the 'active' values will be set
1499                  * to the final 'target' values and we'll do this again to get the
1500                  * optimal watermarks.  For gen9+ platforms, the values we program here
1501                  * will be the final target values which will get automatically latched
1502                  * at vblank time; no further programming will be necessary.
1503                  *
1504                  * If a platform hasn't been transitioned to atomic watermarks yet,
1505                  * we'll continue to update watermarks the old way, if flags tell
1506                  * us to.
1507                  */
1508                 if (!intel_initial_watermarks(state, crtc))
1509                         if (new_crtc_state->update_wm_pre)
1510                                 intel_update_watermarks(dev_priv);
1511         }
1512
1513         /*
1514          * Gen2 reports pipe underruns whenever all planes are disabled.
1515          * So disable underrun reporting before all the planes get disabled.
1516          *
1517          * We do this after .initial_watermarks() so that we have a
1518          * chance of catching underruns with the intermediate watermarks
1519          * vs. the old plane configuration.
1520          */
1521         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1522                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1523
1524         /*
1525          * WA for platforms where async address update enable bit
1526          * is double buffered and only latched at start of vblank.
1527          */
1528         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1529                 intel_crtc_async_flip_disable_wa(state, crtc);
1530 }
1531
1532 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1533                                       struct intel_crtc *crtc)
1534 {
1535         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1536         const struct intel_crtc_state *new_crtc_state =
1537                 intel_atomic_get_new_crtc_state(state, crtc);
1538         unsigned int update_mask = new_crtc_state->update_planes;
1539         const struct intel_plane_state *old_plane_state;
1540         struct intel_plane *plane;
1541         unsigned fb_bits = 0;
1542         int i;
1543
1544         intel_crtc_dpms_overlay_disable(crtc);
1545
1546         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1547                 if (crtc->pipe != plane->pipe ||
1548                     !(update_mask & BIT(plane->id)))
1549                         continue;
1550
1551                 intel_plane_disable_arm(plane, new_crtc_state);
1552
1553                 if (old_plane_state->uapi.visible)
1554                         fb_bits |= plane->frontbuffer_bit;
1555         }
1556
1557         intel_frontbuffer_flip(dev_priv, fb_bits);
1558 }
1559
1560 /*
1561  * intel_connector_primary_encoder - get the primary encoder for a connector
1562  * @connector: connector for which to return the encoder
1563  *
1564  * Returns the primary encoder for a connector. There is a 1:1 mapping from
1565  * all connectors to their encoder, except for DP-MST connectors which have
1566  * both a virtual and a primary encoder. These DP-MST primary encoders can be
1567  * pointed to by as many DP-MST connectors as there are pipes.
1568  */
1569 static struct intel_encoder *
1570 intel_connector_primary_encoder(struct intel_connector *connector)
1571 {
1572         struct intel_encoder *encoder;
1573
1574         if (connector->mst_port)
1575                 return &dp_to_dig_port(connector->mst_port)->base;
1576
1577         encoder = intel_attached_encoder(connector);
1578         drm_WARN_ON(connector->base.dev, !encoder);
1579
1580         return encoder;
1581 }
1582
1583 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1584 {
1585         struct drm_i915_private *i915 = to_i915(state->base.dev);
1586         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1587         struct intel_crtc *crtc;
1588         struct drm_connector_state *new_conn_state;
1589         struct drm_connector *connector;
1590         int i;
1591
1592         /*
1593          * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1594          * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1595          */
1596         if (i915->dpll.mgr) {
1597                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1598                         if (intel_crtc_needs_modeset(new_crtc_state))
1599                                 continue;
1600
1601                         new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1602                         new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1603                 }
1604         }
1605
1606         if (!state->modeset)
1607                 return;
1608
1609         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1610                                         i) {
1611                 struct intel_connector *intel_connector;
1612                 struct intel_encoder *encoder;
1613                 struct intel_crtc *crtc;
1614
1615                 if (!intel_connector_needs_modeset(state, connector))
1616                         continue;
1617
1618                 intel_connector = to_intel_connector(connector);
1619                 encoder = intel_connector_primary_encoder(intel_connector);
1620                 if (!encoder->update_prepare)
1621                         continue;
1622
1623                 crtc = new_conn_state->crtc ?
1624                         to_intel_crtc(new_conn_state->crtc) : NULL;
1625                 encoder->update_prepare(state, encoder, crtc);
1626         }
1627 }
1628
1629 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1630 {
1631         struct drm_connector_state *new_conn_state;
1632         struct drm_connector *connector;
1633         int i;
1634
1635         if (!state->modeset)
1636                 return;
1637
1638         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1639                                         i) {
1640                 struct intel_connector *intel_connector;
1641                 struct intel_encoder *encoder;
1642                 struct intel_crtc *crtc;
1643
1644                 if (!intel_connector_needs_modeset(state, connector))
1645                         continue;
1646
1647                 intel_connector = to_intel_connector(connector);
1648                 encoder = intel_connector_primary_encoder(intel_connector);
1649                 if (!encoder->update_complete)
1650                         continue;
1651
1652                 crtc = new_conn_state->crtc ?
1653                         to_intel_crtc(new_conn_state->crtc) : NULL;
1654                 encoder->update_complete(state, encoder, crtc);
1655         }
1656 }
1657
1658 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1659                                           struct intel_crtc *crtc)
1660 {
1661         const struct intel_crtc_state *crtc_state =
1662                 intel_atomic_get_new_crtc_state(state, crtc);
1663         const struct drm_connector_state *conn_state;
1664         struct drm_connector *conn;
1665         int i;
1666
1667         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1668                 struct intel_encoder *encoder =
1669                         to_intel_encoder(conn_state->best_encoder);
1670
1671                 if (conn_state->crtc != &crtc->base)
1672                         continue;
1673
1674                 if (encoder->pre_pll_enable)
1675                         encoder->pre_pll_enable(state, encoder,
1676                                                 crtc_state, conn_state);
1677         }
1678 }
1679
1680 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1681                                       struct intel_crtc *crtc)
1682 {
1683         const struct intel_crtc_state *crtc_state =
1684                 intel_atomic_get_new_crtc_state(state, crtc);
1685         const struct drm_connector_state *conn_state;
1686         struct drm_connector *conn;
1687         int i;
1688
1689         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1690                 struct intel_encoder *encoder =
1691                         to_intel_encoder(conn_state->best_encoder);
1692
1693                 if (conn_state->crtc != &crtc->base)
1694                         continue;
1695
1696                 if (encoder->pre_enable)
1697                         encoder->pre_enable(state, encoder,
1698                                             crtc_state, conn_state);
1699         }
1700 }
1701
1702 static void intel_encoders_enable(struct intel_atomic_state *state,
1703                                   struct intel_crtc *crtc)
1704 {
1705         const struct intel_crtc_state *crtc_state =
1706                 intel_atomic_get_new_crtc_state(state, crtc);
1707         const struct drm_connector_state *conn_state;
1708         struct drm_connector *conn;
1709         int i;
1710
1711         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1712                 struct intel_encoder *encoder =
1713                         to_intel_encoder(conn_state->best_encoder);
1714
1715                 if (conn_state->crtc != &crtc->base)
1716                         continue;
1717
1718                 if (encoder->enable)
1719                         encoder->enable(state, encoder,
1720                                         crtc_state, conn_state);
1721                 intel_opregion_notify_encoder(encoder, true);
1722         }
1723 }
1724
1725 static void intel_encoders_disable(struct intel_atomic_state *state,
1726                                    struct intel_crtc *crtc)
1727 {
1728         const struct intel_crtc_state *old_crtc_state =
1729                 intel_atomic_get_old_crtc_state(state, crtc);
1730         const struct drm_connector_state *old_conn_state;
1731         struct drm_connector *conn;
1732         int i;
1733
1734         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1735                 struct intel_encoder *encoder =
1736                         to_intel_encoder(old_conn_state->best_encoder);
1737
1738                 if (old_conn_state->crtc != &crtc->base)
1739                         continue;
1740
1741                 intel_opregion_notify_encoder(encoder, false);
1742                 if (encoder->disable)
1743                         encoder->disable(state, encoder,
1744                                          old_crtc_state, old_conn_state);
1745         }
1746 }
1747
1748 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1749                                         struct intel_crtc *crtc)
1750 {
1751         const struct intel_crtc_state *old_crtc_state =
1752                 intel_atomic_get_old_crtc_state(state, crtc);
1753         const struct drm_connector_state *old_conn_state;
1754         struct drm_connector *conn;
1755         int i;
1756
1757         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1758                 struct intel_encoder *encoder =
1759                         to_intel_encoder(old_conn_state->best_encoder);
1760
1761                 if (old_conn_state->crtc != &crtc->base)
1762                         continue;
1763
1764                 if (encoder->post_disable)
1765                         encoder->post_disable(state, encoder,
1766                                               old_crtc_state, old_conn_state);
1767         }
1768 }
1769
1770 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1771                                             struct intel_crtc *crtc)
1772 {
1773         const struct intel_crtc_state *old_crtc_state =
1774                 intel_atomic_get_old_crtc_state(state, crtc);
1775         const struct drm_connector_state *old_conn_state;
1776         struct drm_connector *conn;
1777         int i;
1778
1779         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1780                 struct intel_encoder *encoder =
1781                         to_intel_encoder(old_conn_state->best_encoder);
1782
1783                 if (old_conn_state->crtc != &crtc->base)
1784                         continue;
1785
1786                 if (encoder->post_pll_disable)
1787                         encoder->post_pll_disable(state, encoder,
1788                                                   old_crtc_state, old_conn_state);
1789         }
1790 }
1791
1792 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1793                                        struct intel_crtc *crtc)
1794 {
1795         const struct intel_crtc_state *crtc_state =
1796                 intel_atomic_get_new_crtc_state(state, crtc);
1797         const struct drm_connector_state *conn_state;
1798         struct drm_connector *conn;
1799         int i;
1800
1801         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1802                 struct intel_encoder *encoder =
1803                         to_intel_encoder(conn_state->best_encoder);
1804
1805                 if (conn_state->crtc != &crtc->base)
1806                         continue;
1807
1808                 if (encoder->update_pipe)
1809                         encoder->update_pipe(state, encoder,
1810                                              crtc_state, conn_state);
1811         }
1812 }
1813
1814 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1815 {
1816         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1817         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1818
1819         plane->disable_arm(plane, crtc_state);
1820 }
1821
1822 static void ilk_crtc_enable(struct intel_atomic_state *state,
1823                             struct intel_crtc *crtc)
1824 {
1825         const struct intel_crtc_state *new_crtc_state =
1826                 intel_atomic_get_new_crtc_state(state, crtc);
1827         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1828         enum pipe pipe = crtc->pipe;
1829
1830         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1831                 return;
1832
1833         /*
1834          * Sometimes spurious CPU pipe underruns happen during FDI
1835          * training, at least with VGA+HDMI cloning. Suppress them.
1836          *
1837          * On ILK we get an occasional spurious CPU pipe underruns
1838          * between eDP port A enable and vdd enable. Also PCH port
1839          * enable seems to result in the occasional CPU pipe underrun.
1840          *
1841          * Spurious PCH underruns also occur during PCH enabling.
1842          */
1843         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1844         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1845
1846         if (intel_crtc_has_dp_encoder(new_crtc_state))
1847                 intel_dp_set_m_n(new_crtc_state, M1_N1);
1848
1849         intel_set_transcoder_timings(new_crtc_state);
1850         intel_set_pipe_src_size(new_crtc_state);
1851
1852         if (new_crtc_state->has_pch_encoder)
1853                 intel_cpu_transcoder_set_m_n(new_crtc_state,
1854                                              &new_crtc_state->fdi_m_n, NULL);
1855
1856         ilk_set_pipeconf(new_crtc_state);
1857
1858         crtc->active = true;
1859
1860         intel_encoders_pre_enable(state, crtc);
1861
1862         if (new_crtc_state->has_pch_encoder) {
1863                 ilk_pch_pre_enable(state, crtc);
1864         } else {
1865                 assert_fdi_tx_disabled(dev_priv, pipe);
1866                 assert_fdi_rx_disabled(dev_priv, pipe);
1867         }
1868
1869         ilk_pfit_enable(new_crtc_state);
1870
1871         /*
1872          * On ILK+ LUT must be loaded before the pipe is running but with
1873          * clocks enabled
1874          */
1875         intel_color_load_luts(new_crtc_state);
1876         intel_color_commit(new_crtc_state);
1877         /* update DSPCNTR to configure gamma for pipe bottom color */
1878         intel_disable_primary_plane(new_crtc_state);
1879
1880         intel_initial_watermarks(state, crtc);
1881         intel_enable_transcoder(new_crtc_state);
1882
1883         if (new_crtc_state->has_pch_encoder)
1884                 ilk_pch_enable(state, crtc);
1885
1886         intel_crtc_vblank_on(new_crtc_state);
1887
1888         intel_encoders_enable(state, crtc);
1889
1890         if (HAS_PCH_CPT(dev_priv))
1891                 cpt_verify_modeset(dev_priv, pipe);
1892
1893         /*
1894          * Must wait for vblank to avoid spurious PCH FIFO underruns.
1895          * And a second vblank wait is needed at least on ILK with
1896          * some interlaced HDMI modes. Let's do the double wait always
1897          * in case there are more corner cases we don't know about.
1898          */
1899         if (new_crtc_state->has_pch_encoder) {
1900                 intel_crtc_wait_for_next_vblank(crtc);
1901                 intel_crtc_wait_for_next_vblank(crtc);
1902         }
1903         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1904         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1905 }
1906
1907 /* IPS only exists on ULT machines and is tied to pipe A. */
1908 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1909 {
1910         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1911 }
1912
1913 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1914                                             enum pipe pipe, bool apply)
1915 {
1916         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1917         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1918
1919         if (apply)
1920                 val |= mask;
1921         else
1922                 val &= ~mask;
1923
1924         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1925 }
1926
1927 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1928 {
1929         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1930         enum pipe pipe = crtc->pipe;
1931         u32 val;
1932
1933         /* Wa_22010947358:adl-p */
1934         if (IS_ALDERLAKE_P(dev_priv))
1935                 val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1936         else
1937                 val = MBUS_DBOX_A_CREDIT(2);
1938
1939         if (DISPLAY_VER(dev_priv) >= 12) {
1940                 val |= MBUS_DBOX_BW_CREDIT(2);
1941                 val |= MBUS_DBOX_B_CREDIT(12);
1942         } else {
1943                 val |= MBUS_DBOX_BW_CREDIT(1);
1944                 val |= MBUS_DBOX_B_CREDIT(8);
1945         }
1946
1947         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1948 }
1949
1950 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1951 {
1952         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1953         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1954
1955         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1956                        HSW_LINETIME(crtc_state->linetime) |
1957                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
1958 }
1959
1960 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1961 {
1962         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1963         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1964         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1965         u32 val;
1966
1967         val = intel_de_read(dev_priv, reg);
1968         val &= ~HSW_FRAME_START_DELAY_MASK;
1969         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1970         intel_de_write(dev_priv, reg, val);
1971 }
1972
1973 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1974                                          const struct intel_crtc_state *crtc_state)
1975 {
1976         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1977         struct intel_crtc_state *master_crtc_state;
1978         struct intel_crtc *master_crtc;
1979         struct drm_connector_state *conn_state;
1980         struct drm_connector *conn;
1981         struct intel_encoder *encoder = NULL;
1982         int i;
1983
1984         master_crtc = intel_master_crtc(crtc_state);
1985         master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1986
1987         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1988                 if (conn_state->crtc != &master_crtc->base)
1989                         continue;
1990
1991                 encoder = to_intel_encoder(conn_state->best_encoder);
1992                 break;
1993         }
1994
1995         /*
1996          * Enable sequence steps 1-7 on bigjoiner master
1997          */
1998         if (crtc_state->bigjoiner_slave)
1999                 intel_encoders_pre_pll_enable(state, master_crtc);
2000
2001         if (crtc_state->shared_dpll)
2002                 intel_enable_shared_dpll(crtc_state);
2003
2004         if (crtc_state->bigjoiner_slave)
2005                 intel_encoders_pre_enable(state, master_crtc);
2006
2007         /* need to enable VDSC, which we skipped in pre-enable */
2008         intel_dsc_enable(crtc_state);
2009
2010         if (DISPLAY_VER(dev_priv) >= 13)
2011                 intel_uncompressed_joiner_enable(crtc_state);
2012 }
2013
2014 static void hsw_crtc_enable(struct intel_atomic_state *state,
2015                             struct intel_crtc *crtc)
2016 {
2017         const struct intel_crtc_state *new_crtc_state =
2018                 intel_atomic_get_new_crtc_state(state, crtc);
2019         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2020         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2021         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2022         bool psl_clkgate_wa;
2023
2024         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2025                 return;
2026
2027         if (!new_crtc_state->bigjoiner) {
2028                 intel_encoders_pre_pll_enable(state, crtc);
2029
2030                 if (new_crtc_state->shared_dpll)
2031                         intel_enable_shared_dpll(new_crtc_state);
2032
2033                 intel_encoders_pre_enable(state, crtc);
2034         } else {
2035                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2036         }
2037
2038         intel_set_pipe_src_size(new_crtc_state);
2039         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2040                 bdw_set_pipemisc(new_crtc_state);
2041
2042         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
2043                 intel_set_transcoder_timings(new_crtc_state);
2044
2045                 if (cpu_transcoder != TRANSCODER_EDP)
2046                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2047                                        new_crtc_state->pixel_multiplier - 1);
2048
2049                 if (new_crtc_state->has_pch_encoder)
2050                         intel_cpu_transcoder_set_m_n(new_crtc_state,
2051                                                      &new_crtc_state->fdi_m_n, NULL);
2052
2053                 hsw_set_frame_start_delay(new_crtc_state);
2054
2055                 hsw_set_transconf(new_crtc_state);
2056         }
2057
2058         crtc->active = true;
2059
2060         /* Display WA #1180: WaDisableScalarClockGating: glk */
2061         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2062                 new_crtc_state->pch_pfit.enabled;
2063         if (psl_clkgate_wa)
2064                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2065
2066         if (DISPLAY_VER(dev_priv) >= 9)
2067                 skl_pfit_enable(new_crtc_state);
2068         else
2069                 ilk_pfit_enable(new_crtc_state);
2070
2071         /*
2072          * On ILK+ LUT must be loaded before the pipe is running but with
2073          * clocks enabled
2074          */
2075         intel_color_load_luts(new_crtc_state);
2076         intel_color_commit(new_crtc_state);
2077         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
2078         if (DISPLAY_VER(dev_priv) < 9)
2079                 intel_disable_primary_plane(new_crtc_state);
2080
2081         hsw_set_linetime_wm(new_crtc_state);
2082
2083         if (DISPLAY_VER(dev_priv) >= 11)
2084                 icl_set_pipe_chicken(new_crtc_state);
2085
2086         intel_initial_watermarks(state, crtc);
2087
2088         if (DISPLAY_VER(dev_priv) >= 11) {
2089                 const struct intel_dbuf_state *dbuf_state =
2090                                 intel_atomic_get_new_dbuf_state(state);
2091
2092                 icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2093         }
2094
2095         if (new_crtc_state->bigjoiner_slave)
2096                 intel_crtc_vblank_on(new_crtc_state);
2097
2098         intel_encoders_enable(state, crtc);
2099
2100         if (psl_clkgate_wa) {
2101                 intel_crtc_wait_for_next_vblank(crtc);
2102                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2103         }
2104
2105         /* If we change the relative order between pipe/planes enabling, we need
2106          * to change the workaround. */
2107         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2108         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2109                 struct intel_crtc *wa_crtc;
2110
2111                 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
2112
2113                 intel_crtc_wait_for_next_vblank(wa_crtc);
2114                 intel_crtc_wait_for_next_vblank(wa_crtc);
2115         }
2116 }
2117
2118 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2119 {
2120         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2121         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2122         enum pipe pipe = crtc->pipe;
2123
2124         /* To avoid upsetting the power well on haswell only disable the pfit if
2125          * it's in use. The hw state code will make sure we get this right. */
2126         if (!old_crtc_state->pch_pfit.enabled)
2127                 return;
2128
2129         intel_de_write(dev_priv, PF_CTL(pipe), 0);
2130         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2131         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2132 }
2133
2134 static void ilk_crtc_disable(struct intel_atomic_state *state,
2135                              struct intel_crtc *crtc)
2136 {
2137         const struct intel_crtc_state *old_crtc_state =
2138                 intel_atomic_get_old_crtc_state(state, crtc);
2139         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2140         enum pipe pipe = crtc->pipe;
2141
2142         /*
2143          * Sometimes spurious CPU pipe underruns happen when the
2144          * pipe is already disabled, but FDI RX/TX is still enabled.
2145          * Happens at least with VGA+HDMI cloning. Suppress them.
2146          */
2147         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2148         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2149
2150         intel_encoders_disable(state, crtc);
2151
2152         intel_crtc_vblank_off(old_crtc_state);
2153
2154         intel_disable_transcoder(old_crtc_state);
2155
2156         ilk_pfit_disable(old_crtc_state);
2157
2158         if (old_crtc_state->has_pch_encoder)
2159                 ilk_pch_disable(state, crtc);
2160
2161         intel_encoders_post_disable(state, crtc);
2162
2163         if (old_crtc_state->has_pch_encoder)
2164                 ilk_pch_post_disable(state, crtc);
2165
2166         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2167         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2168 }
2169
2170 static void hsw_crtc_disable(struct intel_atomic_state *state,
2171                              struct intel_crtc *crtc)
2172 {
2173         const struct intel_crtc_state *old_crtc_state =
2174                 intel_atomic_get_old_crtc_state(state, crtc);
2175
2176         /*
2177          * FIXME collapse everything to one hook.
2178          * Need care with mst->ddi interactions.
2179          */
2180         if (!old_crtc_state->bigjoiner_slave) {
2181                 intel_encoders_disable(state, crtc);
2182                 intel_encoders_post_disable(state, crtc);
2183         }
2184 }
2185
2186 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2187 {
2188         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2189         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2190
2191         if (!crtc_state->gmch_pfit.control)
2192                 return;
2193
2194         /*
2195          * The panel fitter should only be adjusted whilst the pipe is disabled,
2196          * according to register description and PRM.
2197          */
2198         drm_WARN_ON(&dev_priv->drm,
2199                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2200         assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2201
2202         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2203                        crtc_state->gmch_pfit.pgm_ratios);
2204         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2205
2206         /* Border color in case we don't scale up to the full screen. Black by
2207          * default, change to something else for debugging. */
2208         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2209 }
2210
2211 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2212 {
2213         if (phy == PHY_NONE)
2214                 return false;
2215         else if (IS_DG2(dev_priv))
2216                 /*
2217                  * DG2 outputs labelled as "combo PHY" in the bspec use
2218                  * SNPS PHYs with completely different programming,
2219                  * hence we always return false here.
2220                  */
2221                 return false;
2222         else if (IS_ALDERLAKE_S(dev_priv))
2223                 return phy <= PHY_E;
2224         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2225                 return phy <= PHY_D;
2226         else if (IS_JSL_EHL(dev_priv))
2227                 return phy <= PHY_C;
2228         else if (DISPLAY_VER(dev_priv) >= 11)
2229                 return phy <= PHY_B;
2230         else
2231                 return false;
2232 }
2233
2234 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2235 {
2236         if (IS_DG2(dev_priv))
2237                 /* DG2's "TC1" output uses a SNPS PHY */
2238                 return false;
2239         else if (IS_ALDERLAKE_P(dev_priv))
2240                 return phy >= PHY_F && phy <= PHY_I;
2241         else if (IS_TIGERLAKE(dev_priv))
2242                 return phy >= PHY_D && phy <= PHY_I;
2243         else if (IS_ICELAKE(dev_priv))
2244                 return phy >= PHY_C && phy <= PHY_F;
2245         else
2246                 return false;
2247 }
2248
2249 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2250 {
2251         if (phy == PHY_NONE)
2252                 return false;
2253         else if (IS_DG2(dev_priv))
2254                 /*
2255                  * All four "combo" ports and the TC1 port (PHY E) use
2256                  * Synopsis PHYs.
2257                  */
2258                 return phy <= PHY_E;
2259
2260         return false;
2261 }
2262
2263 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2264 {
2265         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2266                 return PHY_D + port - PORT_D_XELPD;
2267         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2268                 return PHY_F + port - PORT_TC1;
2269         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2270                 return PHY_B + port - PORT_TC1;
2271         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2272                 return PHY_C + port - PORT_TC1;
2273         else if (IS_JSL_EHL(i915) && port == PORT_D)
2274                 return PHY_A;
2275
2276         return PHY_A + port - PORT_A;
2277 }
2278
2279 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2280 {
2281         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2282                 return TC_PORT_NONE;
2283
2284         if (DISPLAY_VER(dev_priv) >= 12)
2285                 return TC_PORT_1 + port - PORT_TC1;
2286         else
2287                 return TC_PORT_1 + port - PORT_C;
2288 }
2289
2290 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2291 {
2292         switch (port) {
2293         case PORT_A:
2294                 return POWER_DOMAIN_PORT_DDI_A_LANES;
2295         case PORT_B:
2296                 return POWER_DOMAIN_PORT_DDI_B_LANES;
2297         case PORT_C:
2298                 return POWER_DOMAIN_PORT_DDI_C_LANES;
2299         case PORT_D:
2300                 return POWER_DOMAIN_PORT_DDI_D_LANES;
2301         case PORT_E:
2302                 return POWER_DOMAIN_PORT_DDI_E_LANES;
2303         case PORT_F:
2304                 return POWER_DOMAIN_PORT_DDI_F_LANES;
2305         case PORT_G:
2306                 return POWER_DOMAIN_PORT_DDI_G_LANES;
2307         case PORT_H:
2308                 return POWER_DOMAIN_PORT_DDI_H_LANES;
2309         case PORT_I:
2310                 return POWER_DOMAIN_PORT_DDI_I_LANES;
2311         default:
2312                 MISSING_CASE(port);
2313                 return POWER_DOMAIN_PORT_OTHER;
2314         }
2315 }
2316
2317 enum intel_display_power_domain
2318 intel_aux_power_domain(struct intel_digital_port *dig_port)
2319 {
2320         if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2321                 switch (dig_port->aux_ch) {
2322                 case AUX_CH_C:
2323                         return POWER_DOMAIN_AUX_C_TBT;
2324                 case AUX_CH_D:
2325                         return POWER_DOMAIN_AUX_D_TBT;
2326                 case AUX_CH_E:
2327                         return POWER_DOMAIN_AUX_E_TBT;
2328                 case AUX_CH_F:
2329                         return POWER_DOMAIN_AUX_F_TBT;
2330                 case AUX_CH_G:
2331                         return POWER_DOMAIN_AUX_G_TBT;
2332                 case AUX_CH_H:
2333                         return POWER_DOMAIN_AUX_H_TBT;
2334                 case AUX_CH_I:
2335                         return POWER_DOMAIN_AUX_I_TBT;
2336                 default:
2337                         MISSING_CASE(dig_port->aux_ch);
2338                         return POWER_DOMAIN_AUX_C_TBT;
2339                 }
2340         }
2341
2342         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2343 }
2344
2345 /*
2346  * Converts aux_ch to power_domain without caring about TBT ports for that use
2347  * intel_aux_power_domain()
2348  */
2349 enum intel_display_power_domain
2350 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2351 {
2352         switch (aux_ch) {
2353         case AUX_CH_A:
2354                 return POWER_DOMAIN_AUX_A;
2355         case AUX_CH_B:
2356                 return POWER_DOMAIN_AUX_B;
2357         case AUX_CH_C:
2358                 return POWER_DOMAIN_AUX_C;
2359         case AUX_CH_D:
2360                 return POWER_DOMAIN_AUX_D;
2361         case AUX_CH_E:
2362                 return POWER_DOMAIN_AUX_E;
2363         case AUX_CH_F:
2364                 return POWER_DOMAIN_AUX_F;
2365         case AUX_CH_G:
2366                 return POWER_DOMAIN_AUX_G;
2367         case AUX_CH_H:
2368                 return POWER_DOMAIN_AUX_H;
2369         case AUX_CH_I:
2370                 return POWER_DOMAIN_AUX_I;
2371         default:
2372                 MISSING_CASE(aux_ch);
2373                 return POWER_DOMAIN_AUX_A;
2374         }
2375 }
2376
2377 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2378 {
2379         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2380         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2381         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2382         struct drm_encoder *encoder;
2383         enum pipe pipe = crtc->pipe;
2384         u64 mask;
2385
2386         if (!crtc_state->hw.active)
2387                 return 0;
2388
2389         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2390         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2391         if (crtc_state->pch_pfit.enabled ||
2392             crtc_state->pch_pfit.force_thru)
2393                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2394
2395         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2396                                   crtc_state->uapi.encoder_mask) {
2397                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2398
2399                 mask |= BIT_ULL(intel_encoder->power_domain);
2400         }
2401
2402         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2403                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2404
2405         if (crtc_state->shared_dpll)
2406                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2407
2408         if (crtc_state->dsc.compression_enable)
2409                 mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2410
2411         return mask;
2412 }
2413
2414 static u64
2415 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2416 {
2417         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2418         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2419         enum intel_display_power_domain domain;
2420         u64 domains, new_domains, old_domains;
2421
2422         domains = get_crtc_power_domains(crtc_state);
2423
2424         new_domains = domains & ~crtc->enabled_power_domains.mask;
2425         old_domains = crtc->enabled_power_domains.mask & ~domains;
2426
2427         for_each_power_domain(domain, new_domains)
2428                 intel_display_power_get_in_set(dev_priv,
2429                                                &crtc->enabled_power_domains,
2430                                                domain);
2431
2432         return old_domains;
2433 }
2434
2435 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2436                                            u64 domains)
2437 {
2438         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2439                                             &crtc->enabled_power_domains,
2440                                             domains);
2441 }
2442
2443 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2444                                    struct intel_crtc *crtc)
2445 {
2446         const struct intel_crtc_state *new_crtc_state =
2447                 intel_atomic_get_new_crtc_state(state, crtc);
2448         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2449         enum pipe pipe = crtc->pipe;
2450
2451         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2452                 return;
2453
2454         if (intel_crtc_has_dp_encoder(new_crtc_state))
2455                 intel_dp_set_m_n(new_crtc_state, M1_N1);
2456
2457         intel_set_transcoder_timings(new_crtc_state);
2458         intel_set_pipe_src_size(new_crtc_state);
2459
2460         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2461                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2462                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2463         }
2464
2465         i9xx_set_pipeconf(new_crtc_state);
2466
2467         crtc->active = true;
2468
2469         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2470
2471         intel_encoders_pre_pll_enable(state, crtc);
2472
2473         if (IS_CHERRYVIEW(dev_priv))
2474                 chv_enable_pll(new_crtc_state);
2475         else
2476                 vlv_enable_pll(new_crtc_state);
2477
2478         intel_encoders_pre_enable(state, crtc);
2479
2480         i9xx_pfit_enable(new_crtc_state);
2481
2482         intel_color_load_luts(new_crtc_state);
2483         intel_color_commit(new_crtc_state);
2484         /* update DSPCNTR to configure gamma for pipe bottom color */
2485         intel_disable_primary_plane(new_crtc_state);
2486
2487         intel_initial_watermarks(state, crtc);
2488         intel_enable_transcoder(new_crtc_state);
2489
2490         intel_crtc_vblank_on(new_crtc_state);
2491
2492         intel_encoders_enable(state, crtc);
2493 }
2494
2495 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2496                              struct intel_crtc *crtc)
2497 {
2498         const struct intel_crtc_state *new_crtc_state =
2499                 intel_atomic_get_new_crtc_state(state, crtc);
2500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2501         enum pipe pipe = crtc->pipe;
2502
2503         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2504                 return;
2505
2506         if (intel_crtc_has_dp_encoder(new_crtc_state))
2507                 intel_dp_set_m_n(new_crtc_state, M1_N1);
2508
2509         intel_set_transcoder_timings(new_crtc_state);
2510         intel_set_pipe_src_size(new_crtc_state);
2511
2512         i9xx_set_pipeconf(new_crtc_state);
2513
2514         crtc->active = true;
2515
2516         if (DISPLAY_VER(dev_priv) != 2)
2517                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2518
2519         intel_encoders_pre_enable(state, crtc);
2520
2521         i9xx_enable_pll(new_crtc_state);
2522
2523         i9xx_pfit_enable(new_crtc_state);
2524
2525         intel_color_load_luts(new_crtc_state);
2526         intel_color_commit(new_crtc_state);
2527         /* update DSPCNTR to configure gamma for pipe bottom color */
2528         intel_disable_primary_plane(new_crtc_state);
2529
2530         if (!intel_initial_watermarks(state, crtc))
2531                 intel_update_watermarks(dev_priv);
2532         intel_enable_transcoder(new_crtc_state);
2533
2534         intel_crtc_vblank_on(new_crtc_state);
2535
2536         intel_encoders_enable(state, crtc);
2537
2538         /* prevents spurious underruns */
2539         if (DISPLAY_VER(dev_priv) == 2)
2540                 intel_crtc_wait_for_next_vblank(crtc);
2541 }
2542
2543 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2544 {
2545         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2546         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2547
2548         if (!old_crtc_state->gmch_pfit.control)
2549                 return;
2550
2551         assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2552
2553         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2554                     intel_de_read(dev_priv, PFIT_CONTROL));
2555         intel_de_write(dev_priv, PFIT_CONTROL, 0);
2556 }
2557
2558 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2559                               struct intel_crtc *crtc)
2560 {
2561         struct intel_crtc_state *old_crtc_state =
2562                 intel_atomic_get_old_crtc_state(state, crtc);
2563         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2564         enum pipe pipe = crtc->pipe;
2565
2566         /*
2567          * On gen2 planes are double buffered but the pipe isn't, so we must
2568          * wait for planes to fully turn off before disabling the pipe.
2569          */
2570         if (DISPLAY_VER(dev_priv) == 2)
2571                 intel_crtc_wait_for_next_vblank(crtc);
2572
2573         intel_encoders_disable(state, crtc);
2574
2575         intel_crtc_vblank_off(old_crtc_state);
2576
2577         intel_disable_transcoder(old_crtc_state);
2578
2579         i9xx_pfit_disable(old_crtc_state);
2580
2581         intel_encoders_post_disable(state, crtc);
2582
2583         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2584                 if (IS_CHERRYVIEW(dev_priv))
2585                         chv_disable_pll(dev_priv, pipe);
2586                 else if (IS_VALLEYVIEW(dev_priv))
2587                         vlv_disable_pll(dev_priv, pipe);
2588                 else
2589                         i9xx_disable_pll(old_crtc_state);
2590         }
2591
2592         intel_encoders_post_pll_disable(state, crtc);
2593
2594         if (DISPLAY_VER(dev_priv) != 2)
2595                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2596
2597         if (!dev_priv->wm_disp->initial_watermarks)
2598                 intel_update_watermarks(dev_priv);
2599
2600         /* clock the pipe down to 640x480@60 to potentially save power */
2601         if (IS_I830(dev_priv))
2602                 i830_enable_pipe(dev_priv, pipe);
2603 }
2604
2605 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2606                                         struct drm_modeset_acquire_ctx *ctx)
2607 {
2608         struct intel_encoder *encoder;
2609         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2610         struct intel_bw_state *bw_state =
2611                 to_intel_bw_state(dev_priv->bw_obj.state);
2612         struct intel_cdclk_state *cdclk_state =
2613                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2614         struct intel_dbuf_state *dbuf_state =
2615                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2616         struct intel_crtc_state *crtc_state =
2617                 to_intel_crtc_state(crtc->base.state);
2618         struct intel_plane *plane;
2619         struct drm_atomic_state *state;
2620         struct intel_crtc_state *temp_crtc_state;
2621         enum pipe pipe = crtc->pipe;
2622         int ret;
2623
2624         if (!crtc_state->hw.active)
2625                 return;
2626
2627         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2628                 const struct intel_plane_state *plane_state =
2629                         to_intel_plane_state(plane->base.state);
2630
2631                 if (plane_state->uapi.visible)
2632                         intel_plane_disable_noatomic(crtc, plane);
2633         }
2634
2635         state = drm_atomic_state_alloc(&dev_priv->drm);
2636         if (!state) {
2637                 drm_dbg_kms(&dev_priv->drm,
2638                             "failed to disable [CRTC:%d:%s], out of memory",
2639                             crtc->base.base.id, crtc->base.name);
2640                 return;
2641         }
2642
2643         state->acquire_ctx = ctx;
2644
2645         /* Everything's already locked, -EDEADLK can't happen. */
2646         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2647         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2648
2649         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2650
2651         dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2652
2653         drm_atomic_state_put(state);
2654
2655         drm_dbg_kms(&dev_priv->drm,
2656                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2657                     crtc->base.base.id, crtc->base.name);
2658
2659         crtc->active = false;
2660         crtc->base.enabled = false;
2661
2662         drm_WARN_ON(&dev_priv->drm,
2663                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2664         crtc_state->uapi.active = false;
2665         crtc_state->uapi.connector_mask = 0;
2666         crtc_state->uapi.encoder_mask = 0;
2667         intel_crtc_free_hw_state(crtc_state);
2668         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2669
2670         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2671                 encoder->base.crtc = NULL;
2672
2673         intel_fbc_disable(crtc);
2674         intel_update_watermarks(dev_priv);
2675         intel_disable_shared_dpll(crtc_state);
2676
2677         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2678
2679         cdclk_state->min_cdclk[pipe] = 0;
2680         cdclk_state->min_voltage_level[pipe] = 0;
2681         cdclk_state->active_pipes &= ~BIT(pipe);
2682
2683         dbuf_state->active_pipes &= ~BIT(pipe);
2684
2685         bw_state->data_rate[pipe] = 0;
2686         bw_state->num_active_planes[pipe] = 0;
2687 }
2688
2689 /*
2690  * turn all crtc's off, but do not adjust state
2691  * This has to be paired with a call to intel_modeset_setup_hw_state.
2692  */
2693 int intel_display_suspend(struct drm_device *dev)
2694 {
2695         struct drm_i915_private *dev_priv = to_i915(dev);
2696         struct drm_atomic_state *state;
2697         int ret;
2698
2699         if (!HAS_DISPLAY(dev_priv))
2700                 return 0;
2701
2702         state = drm_atomic_helper_suspend(dev);
2703         ret = PTR_ERR_OR_ZERO(state);
2704         if (ret)
2705                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2706                         ret);
2707         else
2708                 dev_priv->modeset_restore_state = state;
2709         return ret;
2710 }
2711
2712 void intel_encoder_destroy(struct drm_encoder *encoder)
2713 {
2714         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2715
2716         drm_encoder_cleanup(encoder);
2717         kfree(intel_encoder);
2718 }
2719
2720 /* Cross check the actual hw state with our own modeset state tracking (and it's
2721  * internal consistency). */
2722 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2723                                          struct drm_connector_state *conn_state)
2724 {
2725         struct intel_connector *connector = to_intel_connector(conn_state->connector);
2726         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2727
2728         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2729                     connector->base.base.id, connector->base.name);
2730
2731         if (connector->get_hw_state(connector)) {
2732                 struct intel_encoder *encoder = intel_attached_encoder(connector);
2733
2734                 I915_STATE_WARN(!crtc_state,
2735                          "connector enabled without attached crtc\n");
2736
2737                 if (!crtc_state)
2738                         return;
2739
2740                 I915_STATE_WARN(!crtc_state->hw.active,
2741                                 "connector is active, but attached crtc isn't\n");
2742
2743                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2744                         return;
2745
2746                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2747                         "atomic encoder doesn't match attached encoder\n");
2748
2749                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2750                         "attached encoder crtc differs from connector crtc\n");
2751         } else {
2752                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2753                                 "attached crtc is active, but connector isn't\n");
2754                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2755                         "best encoder set without crtc!\n");
2756         }
2757 }
2758
2759 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2760 {
2761         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2762         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2763
2764         /* IPS only exists on ULT machines and is tied to pipe A. */
2765         if (!hsw_crtc_supports_ips(crtc))
2766                 return false;
2767
2768         if (!dev_priv->params.enable_ips)
2769                 return false;
2770
2771         if (crtc_state->pipe_bpp > 24)
2772                 return false;
2773
2774         /*
2775          * We compare against max which means we must take
2776          * the increased cdclk requirement into account when
2777          * calculating the new cdclk.
2778          *
2779          * Should measure whether using a lower cdclk w/o IPS
2780          */
2781         if (IS_BROADWELL(dev_priv) &&
2782             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2783                 return false;
2784
2785         return true;
2786 }
2787
2788 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2789 {
2790         struct drm_i915_private *dev_priv =
2791                 to_i915(crtc_state->uapi.crtc->dev);
2792         struct intel_atomic_state *state =
2793                 to_intel_atomic_state(crtc_state->uapi.state);
2794
2795         crtc_state->ips_enabled = false;
2796
2797         if (!hsw_crtc_state_ips_capable(crtc_state))
2798                 return 0;
2799
2800         /*
2801          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2802          * enabled and disabled dynamically based on package C states,
2803          * user space can't make reliable use of the CRCs, so let's just
2804          * completely disable it.
2805          */
2806         if (crtc_state->crc_enabled)
2807                 return 0;
2808
2809         /* IPS should be fine as long as at least one plane is enabled. */
2810         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2811                 return 0;
2812
2813         if (IS_BROADWELL(dev_priv)) {
2814                 const struct intel_cdclk_state *cdclk_state;
2815
2816                 cdclk_state = intel_atomic_get_cdclk_state(state);
2817                 if (IS_ERR(cdclk_state))
2818                         return PTR_ERR(cdclk_state);
2819
2820                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2821                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2822                         return 0;
2823         }
2824
2825         crtc_state->ips_enabled = true;
2826
2827         return 0;
2828 }
2829
2830 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2831 {
2832         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2833
2834         /* GDG double wide on either pipe, otherwise pipe A only */
2835         return DISPLAY_VER(dev_priv) < 4 &&
2836                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2837 }
2838
2839 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2840 {
2841         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2842         struct drm_rect src;
2843
2844         /*
2845          * We only use IF-ID interlacing. If we ever use
2846          * PF-ID we'll need to adjust the pixel_rate here.
2847          */
2848
2849         if (!crtc_state->pch_pfit.enabled)
2850                 return pixel_rate;
2851
2852         drm_rect_init(&src, 0, 0,
2853                       crtc_state->pipe_src_w << 16,
2854                       crtc_state->pipe_src_h << 16);
2855
2856         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2857                                    pixel_rate);
2858 }
2859
2860 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2861                                          const struct drm_display_mode *timings)
2862 {
2863         mode->hdisplay = timings->crtc_hdisplay;
2864         mode->htotal = timings->crtc_htotal;
2865         mode->hsync_start = timings->crtc_hsync_start;
2866         mode->hsync_end = timings->crtc_hsync_end;
2867
2868         mode->vdisplay = timings->crtc_vdisplay;
2869         mode->vtotal = timings->crtc_vtotal;
2870         mode->vsync_start = timings->crtc_vsync_start;
2871         mode->vsync_end = timings->crtc_vsync_end;
2872
2873         mode->flags = timings->flags;
2874         mode->type = DRM_MODE_TYPE_DRIVER;
2875
2876         mode->clock = timings->crtc_clock;
2877
2878         drm_mode_set_name(mode);
2879 }
2880
2881 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2882 {
2883         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2884
2885         if (HAS_GMCH(dev_priv))
2886                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2887                 crtc_state->pixel_rate =
2888                         crtc_state->hw.pipe_mode.crtc_clock;
2889         else
2890                 crtc_state->pixel_rate =
2891                         ilk_pipe_pixel_rate(crtc_state);
2892 }
2893
2894 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2895 {
2896         struct drm_display_mode *mode = &crtc_state->hw.mode;
2897         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2898         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2899
2900         drm_mode_copy(pipe_mode, adjusted_mode);
2901
2902         if (crtc_state->bigjoiner) {
2903                 /*
2904                  * transcoder is programmed to the full mode,
2905                  * but pipe timings are half of the transcoder mode
2906                  */
2907                 pipe_mode->crtc_hdisplay /= 2;
2908                 pipe_mode->crtc_hblank_start /= 2;
2909                 pipe_mode->crtc_hblank_end /= 2;
2910                 pipe_mode->crtc_hsync_start /= 2;
2911                 pipe_mode->crtc_hsync_end /= 2;
2912                 pipe_mode->crtc_htotal /= 2;
2913                 pipe_mode->crtc_clock /= 2;
2914         }
2915
2916         if (crtc_state->splitter.enable) {
2917                 int n = crtc_state->splitter.link_count;
2918                 int overlap = crtc_state->splitter.pixel_overlap;
2919
2920                 /*
2921                  * eDP MSO uses segment timings from EDID for transcoder
2922                  * timings, but full mode for everything else.
2923                  *
2924                  * h_full = (h_segment - pixel_overlap) * link_count
2925                  */
2926                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2927                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2928                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2929                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2930                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2931                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2932                 pipe_mode->crtc_clock *= n;
2933
2934                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2935                 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2936         } else {
2937                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2938                 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2939         }
2940
2941         intel_crtc_compute_pixel_rate(crtc_state);
2942
2943         drm_mode_copy(mode, adjusted_mode);
2944         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2945         mode->vdisplay = crtc_state->pipe_src_h;
2946 }
2947
2948 static void intel_encoder_get_config(struct intel_encoder *encoder,
2949                                      struct intel_crtc_state *crtc_state)
2950 {
2951         encoder->get_config(encoder, crtc_state);
2952
2953         intel_crtc_readout_derived_state(crtc_state);
2954 }
2955
2956 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2957                                      struct intel_crtc_state *pipe_config)
2958 {
2959         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2960         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2961         int clock_limit = dev_priv->max_dotclk_freq;
2962
2963         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2964
2965         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2966         if (pipe_config->bigjoiner) {
2967                 pipe_mode->crtc_clock /= 2;
2968                 pipe_mode->crtc_hdisplay /= 2;
2969                 pipe_mode->crtc_hblank_start /= 2;
2970                 pipe_mode->crtc_hblank_end /= 2;
2971                 pipe_mode->crtc_hsync_start /= 2;
2972                 pipe_mode->crtc_hsync_end /= 2;
2973                 pipe_mode->crtc_htotal /= 2;
2974                 pipe_config->pipe_src_w /= 2;
2975         }
2976
2977         if (pipe_config->splitter.enable) {
2978                 int n = pipe_config->splitter.link_count;
2979                 int overlap = pipe_config->splitter.pixel_overlap;
2980
2981                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2982                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2983                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2984                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2985                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2986                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2987                 pipe_mode->crtc_clock *= n;
2988         }
2989
2990         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2991
2992         if (DISPLAY_VER(dev_priv) < 4) {
2993                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
2994
2995                 /*
2996                  * Enable double wide mode when the dot clock
2997                  * is > 90% of the (display) core speed.
2998                  */
2999                 if (intel_crtc_supports_double_wide(crtc) &&
3000                     pipe_mode->crtc_clock > clock_limit) {
3001                         clock_limit = dev_priv->max_dotclk_freq;
3002                         pipe_config->double_wide = true;
3003                 }
3004         }
3005
3006         if (pipe_mode->crtc_clock > clock_limit) {
3007                 drm_dbg_kms(&dev_priv->drm,
3008                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3009                             pipe_mode->crtc_clock, clock_limit,
3010                             yesno(pipe_config->double_wide));
3011                 return -EINVAL;
3012         }
3013
3014         /*
3015          * Pipe horizontal size must be even in:
3016          * - DVO ganged mode
3017          * - LVDS dual channel mode
3018          * - Double wide pipe
3019          */
3020         if (pipe_config->pipe_src_w & 1) {
3021                 if (pipe_config->double_wide) {
3022                         drm_dbg_kms(&dev_priv->drm,
3023                                     "Odd pipe source width not supported with double wide pipe\n");
3024                         return -EINVAL;
3025                 }
3026
3027                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3028                     intel_is_dual_link_lvds(dev_priv)) {
3029                         drm_dbg_kms(&dev_priv->drm,
3030                                     "Odd pipe source width not supported with dual link LVDS\n");
3031                         return -EINVAL;
3032                 }
3033         }
3034
3035         intel_crtc_compute_pixel_rate(pipe_config);
3036
3037         if (pipe_config->has_pch_encoder)
3038                 return ilk_fdi_compute_config(crtc, pipe_config);
3039
3040         return 0;
3041 }
3042
3043 static void
3044 intel_reduce_m_n_ratio(u32 *num, u32 *den)
3045 {
3046         while (*num > DATA_LINK_M_N_MASK ||
3047                *den > DATA_LINK_M_N_MASK) {
3048                 *num >>= 1;
3049                 *den >>= 1;
3050         }
3051 }
3052
3053 static void compute_m_n(unsigned int m, unsigned int n,
3054                         u32 *ret_m, u32 *ret_n,
3055                         bool constant_n)
3056 {
3057         /*
3058          * Several DP dongles in particular seem to be fussy about
3059          * too large link M/N values. Give N value as 0x8000 that
3060          * should be acceptable by specific devices. 0x8000 is the
3061          * specified fixed N value for asynchronous clock mode,
3062          * which the devices expect also in synchronous clock mode.
3063          */
3064         if (constant_n)
3065                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
3066         else
3067                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3068
3069         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3070         intel_reduce_m_n_ratio(ret_m, ret_n);
3071 }
3072
3073 void
3074 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3075                        int pixel_clock, int link_clock,
3076                        struct intel_link_m_n *m_n,
3077                        bool constant_n, bool fec_enable)
3078 {
3079         u32 data_clock = bits_per_pixel * pixel_clock;
3080
3081         if (fec_enable)
3082                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
3083
3084         m_n->tu = 64;
3085         compute_m_n(data_clock,
3086                     link_clock * nlanes * 8,
3087                     &m_n->gmch_m, &m_n->gmch_n,
3088                     constant_n);
3089
3090         compute_m_n(pixel_clock, link_clock,
3091                     &m_n->link_m, &m_n->link_n,
3092                     constant_n);
3093 }
3094
3095 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3096 {
3097         /*
3098          * There may be no VBT; and if the BIOS enabled SSC we can
3099          * just keep using it to avoid unnecessary flicker.  Whereas if the
3100          * BIOS isn't using it, don't assume it will work even if the VBT
3101          * indicates as much.
3102          */
3103         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3104                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3105                                                        PCH_DREF_CONTROL) &
3106                         DREF_SSC1_ENABLE;
3107
3108                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3109                         drm_dbg_kms(&dev_priv->drm,
3110                                     "SSC %s by BIOS, overriding VBT which says %s\n",
3111                                     enableddisabled(bios_lvds_use_ssc),
3112                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
3113                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3114                 }
3115         }
3116 }
3117
3118 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3119                                          const struct intel_link_m_n *m_n)
3120 {
3121         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3122         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3123         enum pipe pipe = crtc->pipe;
3124
3125         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
3126                        TU_SIZE(m_n->tu) | m_n->gmch_m);
3127         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
3128         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
3129         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
3130 }
3131
3132 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3133                                  enum transcoder transcoder)
3134 {
3135         if (IS_HASWELL(dev_priv))
3136                 return transcoder == TRANSCODER_EDP;
3137
3138         /*
3139          * Strictly speaking some registers are available before
3140          * gen7, but we only support DRRS on gen7+
3141          */
3142         return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3143 }
3144
3145 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3146                                          const struct intel_link_m_n *m_n,
3147                                          const struct intel_link_m_n *m2_n2)
3148 {
3149         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3150         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3151         enum pipe pipe = crtc->pipe;
3152         enum transcoder transcoder = crtc_state->cpu_transcoder;
3153
3154         if (DISPLAY_VER(dev_priv) >= 5) {
3155                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
3156                                TU_SIZE(m_n->tu) | m_n->gmch_m);
3157                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
3158                                m_n->gmch_n);
3159                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
3160                                m_n->link_m);
3161                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
3162                                m_n->link_n);
3163                 /*
3164                  *  M2_N2 registers are set only if DRRS is supported
3165                  * (to make sure the registers are not unnecessarily accessed).
3166                  */
3167                 if (m2_n2 && crtc_state->has_drrs &&
3168                     transcoder_has_m2_n2(dev_priv, transcoder)) {
3169                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
3170                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
3171                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
3172                                        m2_n2->gmch_n);
3173                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
3174                                        m2_n2->link_m);
3175                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
3176                                        m2_n2->link_n);
3177                 }
3178         } else {
3179                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
3180                                TU_SIZE(m_n->tu) | m_n->gmch_m);
3181                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
3182                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
3183                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
3184         }
3185 }
3186
3187 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
3188 {
3189         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
3190         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3191
3192         if (m_n == M1_N1) {
3193                 dp_m_n = &crtc_state->dp_m_n;
3194                 dp_m2_n2 = &crtc_state->dp_m2_n2;
3195         } else if (m_n == M2_N2) {
3196
3197                 /*
3198                  * M2_N2 registers are not supported. Hence m2_n2 divider value
3199                  * needs to be programmed into M1_N1.
3200                  */
3201                 dp_m_n = &crtc_state->dp_m2_n2;
3202         } else {
3203                 drm_err(&i915->drm, "Unsupported divider value\n");
3204                 return;
3205         }
3206
3207         if (crtc_state->has_pch_encoder)
3208                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
3209         else
3210                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
3211 }
3212
3213 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3214 {
3215         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3216         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3217         enum pipe pipe = crtc->pipe;
3218         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3219         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3220         u32 crtc_vtotal, crtc_vblank_end;
3221         int vsyncshift = 0;
3222
3223         /* We need to be careful not to changed the adjusted mode, for otherwise
3224          * the hw state checker will get angry at the mismatch. */
3225         crtc_vtotal = adjusted_mode->crtc_vtotal;
3226         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3227
3228         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3229                 /* the chip adds 2 halflines automatically */
3230                 crtc_vtotal -= 1;
3231                 crtc_vblank_end -= 1;
3232
3233                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3234                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3235                 else
3236                         vsyncshift = adjusted_mode->crtc_hsync_start -
3237                                 adjusted_mode->crtc_htotal / 2;
3238                 if (vsyncshift < 0)
3239                         vsyncshift += adjusted_mode->crtc_htotal;
3240         }
3241
3242         if (DISPLAY_VER(dev_priv) > 3)
3243                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3244                                vsyncshift);
3245
3246         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3247                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3248         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3249                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3250         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3251                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3252
3253         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3254                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3255         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3256                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3257         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3258                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3259
3260         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3261          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3262          * documented on the DDI_FUNC_CTL register description, EDP Input Select
3263          * bits. */
3264         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3265             (pipe == PIPE_B || pipe == PIPE_C))
3266                 intel_de_write(dev_priv, VTOTAL(pipe),
3267                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3268
3269 }
3270
3271 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3272 {
3273         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3274         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3275         enum pipe pipe = crtc->pipe;
3276
3277         /* pipesrc controls the size that is scaled from, which should
3278          * always be the user's requested size.
3279          */
3280         intel_de_write(dev_priv, PIPESRC(pipe),
3281                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
3282 }
3283
3284 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3285 {
3286         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3287         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3288
3289         if (DISPLAY_VER(dev_priv) == 2)
3290                 return false;
3291
3292         if (DISPLAY_VER(dev_priv) >= 9 ||
3293             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3294                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3295         else
3296                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3297 }
3298
3299 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3300                                          struct intel_crtc_state *pipe_config)
3301 {
3302         struct drm_device *dev = crtc->base.dev;
3303         struct drm_i915_private *dev_priv = to_i915(dev);
3304         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3305         u32 tmp;
3306
3307         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3308         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3309         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3310
3311         if (!transcoder_is_dsi(cpu_transcoder)) {
3312                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3313                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
3314                                                         (tmp & 0xffff) + 1;
3315                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
3316                                                 ((tmp >> 16) & 0xffff) + 1;
3317         }
3318         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3319         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3320         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3321
3322         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3323         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3324         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3325
3326         if (!transcoder_is_dsi(cpu_transcoder)) {
3327                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3328                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
3329                                                         (tmp & 0xffff) + 1;
3330                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
3331                                                 ((tmp >> 16) & 0xffff) + 1;
3332         }
3333         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3334         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3335         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3336
3337         if (intel_pipe_is_interlaced(pipe_config)) {
3338                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3339                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3340                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3341         }
3342 }
3343
3344 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3345                                     struct intel_crtc_state *pipe_config)
3346 {
3347         struct drm_device *dev = crtc->base.dev;
3348         struct drm_i915_private *dev_priv = to_i915(dev);
3349         u32 tmp;
3350
3351         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3352         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
3353         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
3354 }
3355
3356 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3357 {
3358         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3359         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3360         u32 pipeconf;
3361
3362         pipeconf = 0;
3363
3364         /* we keep both pipes enabled on 830 */
3365         if (IS_I830(dev_priv))
3366                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3367
3368         if (crtc_state->double_wide)
3369                 pipeconf |= PIPECONF_DOUBLE_WIDE;
3370
3371         /* only g4x and later have fancy bpc/dither controls */
3372         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3373             IS_CHERRYVIEW(dev_priv)) {
3374                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
3375                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3376                         pipeconf |= PIPECONF_DITHER_EN |
3377                                     PIPECONF_DITHER_TYPE_SP;
3378
3379                 switch (crtc_state->pipe_bpp) {
3380                 case 18:
3381                         pipeconf |= PIPECONF_6BPC;
3382                         break;
3383                 case 24:
3384                         pipeconf |= PIPECONF_8BPC;
3385                         break;
3386                 case 30:
3387                         pipeconf |= PIPECONF_10BPC;
3388                         break;
3389                 default:
3390                         /* Case prevented by intel_choose_pipe_bpp_dither. */
3391                         BUG();
3392                 }
3393         }
3394
3395         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3396                 if (DISPLAY_VER(dev_priv) < 4 ||
3397                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3398                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3399                 else
3400                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3401         } else {
3402                 pipeconf |= PIPECONF_PROGRESSIVE;
3403         }
3404
3405         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3406              crtc_state->limited_color_range)
3407                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3408
3409         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3410
3411         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3412
3413         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3414         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3415 }
3416
3417 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3418 {
3419         if (IS_I830(dev_priv))
3420                 return false;
3421
3422         return DISPLAY_VER(dev_priv) >= 4 ||
3423                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3424 }
3425
3426 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3427 {
3428         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3429         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3430         u32 tmp;
3431
3432         if (!i9xx_has_pfit(dev_priv))
3433                 return;
3434
3435         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3436         if (!(tmp & PFIT_ENABLE))
3437                 return;
3438
3439         /* Check whether the pfit is attached to our pipe. */
3440         if (DISPLAY_VER(dev_priv) < 4) {
3441                 if (crtc->pipe != PIPE_B)
3442                         return;
3443         } else {
3444                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3445                         return;
3446         }
3447
3448         crtc_state->gmch_pfit.control = tmp;
3449         crtc_state->gmch_pfit.pgm_ratios =
3450                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3451 }
3452
3453 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3454                                struct intel_crtc_state *pipe_config)
3455 {
3456         struct drm_device *dev = crtc->base.dev;
3457         struct drm_i915_private *dev_priv = to_i915(dev);
3458         enum pipe pipe = crtc->pipe;
3459         struct dpll clock;
3460         u32 mdiv;
3461         int refclk = 100000;
3462
3463         /* In case of DSI, DPLL will not be used */
3464         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3465                 return;
3466
3467         vlv_dpio_get(dev_priv);
3468         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3469         vlv_dpio_put(dev_priv);
3470
3471         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3472         clock.m2 = mdiv & DPIO_M2DIV_MASK;
3473         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3474         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3475         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3476
3477         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3478 }
3479
3480 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3481                                struct intel_crtc_state *pipe_config)
3482 {
3483         struct drm_device *dev = crtc->base.dev;
3484         struct drm_i915_private *dev_priv = to_i915(dev);
3485         enum pipe pipe = crtc->pipe;
3486         enum dpio_channel port = vlv_pipe_to_channel(pipe);
3487         struct dpll clock;
3488         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3489         int refclk = 100000;
3490
3491         /* In case of DSI, DPLL will not be used */
3492         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3493                 return;
3494
3495         vlv_dpio_get(dev_priv);
3496         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3497         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3498         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3499         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3500         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3501         vlv_dpio_put(dev_priv);
3502
3503         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3504         clock.m2 = (pll_dw0 & 0xff) << 22;
3505         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3506                 clock.m2 |= pll_dw2 & 0x3fffff;
3507         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3508         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3509         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3510
3511         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3512 }
3513
3514 static enum intel_output_format
3515 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3516 {
3517         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3518         u32 tmp;
3519
3520         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3521
3522         if (tmp & PIPEMISC_YUV420_ENABLE) {
3523                 /* We support 4:2:0 in full blend mode only */
3524                 drm_WARN_ON(&dev_priv->drm,
3525                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3526
3527                 return INTEL_OUTPUT_FORMAT_YCBCR420;
3528         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3529                 return INTEL_OUTPUT_FORMAT_YCBCR444;
3530         } else {
3531                 return INTEL_OUTPUT_FORMAT_RGB;
3532         }
3533 }
3534
3535 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3536 {
3537         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3538         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3539         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3540         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3541         u32 tmp;
3542
3543         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3544
3545         if (tmp & DISP_PIPE_GAMMA_ENABLE)
3546                 crtc_state->gamma_enable = true;
3547
3548         if (!HAS_GMCH(dev_priv) &&
3549             tmp & DISP_PIPE_CSC_ENABLE)
3550                 crtc_state->csc_enable = true;
3551 }
3552
3553 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3554                                  struct intel_crtc_state *pipe_config)
3555 {
3556         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3557         enum intel_display_power_domain power_domain;
3558         intel_wakeref_t wakeref;
3559         u32 tmp;
3560         bool ret;
3561
3562         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3563         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3564         if (!wakeref)
3565                 return false;
3566
3567         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3568         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3569         pipe_config->shared_dpll = NULL;
3570
3571         ret = false;
3572
3573         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3574         if (!(tmp & PIPECONF_ENABLE))
3575                 goto out;
3576
3577         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3578             IS_CHERRYVIEW(dev_priv)) {
3579                 switch (tmp & PIPECONF_BPC_MASK) {
3580                 case PIPECONF_6BPC:
3581                         pipe_config->pipe_bpp = 18;
3582                         break;
3583                 case PIPECONF_8BPC:
3584                         pipe_config->pipe_bpp = 24;
3585                         break;
3586                 case PIPECONF_10BPC:
3587                         pipe_config->pipe_bpp = 30;
3588                         break;
3589                 default:
3590                         break;
3591                 }
3592         }
3593
3594         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3595             (tmp & PIPECONF_COLOR_RANGE_SELECT))
3596                 pipe_config->limited_color_range = true;
3597
3598         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
3599                 PIPECONF_GAMMA_MODE_SHIFT;
3600
3601         if (IS_CHERRYVIEW(dev_priv))
3602                 pipe_config->cgm_mode = intel_de_read(dev_priv,
3603                                                       CGM_PIPE_MODE(crtc->pipe));
3604
3605         i9xx_get_pipe_color_config(pipe_config);
3606         intel_color_get_config(pipe_config);
3607
3608         if (DISPLAY_VER(dev_priv) < 4)
3609                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3610
3611         intel_get_transcoder_timings(crtc, pipe_config);
3612         intel_get_pipe_src_size(crtc, pipe_config);
3613
3614         i9xx_get_pfit_config(pipe_config);
3615
3616         if (DISPLAY_VER(dev_priv) >= 4) {
3617                 /* No way to read it out on pipes B and C */
3618                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3619                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
3620                 else
3621                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3622                 pipe_config->pixel_multiplier =
3623                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3624                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3625                 pipe_config->dpll_hw_state.dpll_md = tmp;
3626         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3627                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3628                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3629                 pipe_config->pixel_multiplier =
3630                         ((tmp & SDVO_MULTIPLIER_MASK)
3631                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3632         } else {
3633                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3634                  * port and will be fixed up in the encoder->get_config
3635                  * function. */
3636                 pipe_config->pixel_multiplier = 1;
3637         }
3638         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3639                                                         DPLL(crtc->pipe));
3640         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3641                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3642                                                                FP0(crtc->pipe));
3643                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3644                                                                FP1(crtc->pipe));
3645         } else {
3646                 /* Mask out read-only status bits. */
3647                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3648                                                      DPLL_PORTC_READY_MASK |
3649                                                      DPLL_PORTB_READY_MASK);
3650         }
3651
3652         if (IS_CHERRYVIEW(dev_priv))
3653                 chv_crtc_clock_get(crtc, pipe_config);
3654         else if (IS_VALLEYVIEW(dev_priv))
3655                 vlv_crtc_clock_get(crtc, pipe_config);
3656         else
3657                 i9xx_crtc_clock_get(crtc, pipe_config);
3658
3659         /*
3660          * Normally the dotclock is filled in by the encoder .get_config()
3661          * but in case the pipe is enabled w/o any ports we need a sane
3662          * default.
3663          */
3664         pipe_config->hw.adjusted_mode.crtc_clock =
3665                 pipe_config->port_clock / pipe_config->pixel_multiplier;
3666
3667         ret = true;
3668
3669 out:
3670         intel_display_power_put(dev_priv, power_domain, wakeref);
3671
3672         return ret;
3673 }
3674
3675 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3676 {
3677         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3678         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3679         enum pipe pipe = crtc->pipe;
3680         u32 val;
3681
3682         val = 0;
3683
3684         switch (crtc_state->pipe_bpp) {
3685         case 18:
3686                 val |= PIPECONF_6BPC;
3687                 break;
3688         case 24:
3689                 val |= PIPECONF_8BPC;
3690                 break;
3691         case 30:
3692                 val |= PIPECONF_10BPC;
3693                 break;
3694         case 36:
3695                 val |= PIPECONF_12BPC;
3696                 break;
3697         default:
3698                 /* Case prevented by intel_choose_pipe_bpp_dither. */
3699                 BUG();
3700         }
3701
3702         if (crtc_state->dither)
3703                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3704
3705         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3706                 val |= PIPECONF_INTERLACED_ILK;
3707         else
3708                 val |= PIPECONF_PROGRESSIVE;
3709
3710         /*
3711          * This would end up with an odd purple hue over
3712          * the entire display. Make sure we don't do it.
3713          */
3714         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3715                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3716
3717         if (crtc_state->limited_color_range &&
3718             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3719                 val |= PIPECONF_COLOR_RANGE_SELECT;
3720
3721         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3722                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3723
3724         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3725
3726         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3727
3728         intel_de_write(dev_priv, PIPECONF(pipe), val);
3729         intel_de_posting_read(dev_priv, PIPECONF(pipe));
3730 }
3731
3732 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3733 {
3734         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3735         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3736         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3737         u32 val = 0;
3738
3739         if (IS_HASWELL(dev_priv) && crtc_state->dither)
3740                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3741
3742         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3743                 val |= PIPECONF_INTERLACED_ILK;
3744         else
3745                 val |= PIPECONF_PROGRESSIVE;
3746
3747         if (IS_HASWELL(dev_priv) &&
3748             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3749                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3750
3751         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3752         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3753 }
3754
3755 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3756 {
3757         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3758         const struct intel_crtc_scaler_state *scaler_state =
3759                 &crtc_state->scaler_state;
3760
3761         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3762         u32 val = 0;
3763         int i;
3764
3765         switch (crtc_state->pipe_bpp) {
3766         case 18:
3767                 val |= PIPEMISC_6_BPC;
3768                 break;
3769         case 24:
3770                 val |= PIPEMISC_8_BPC;
3771                 break;
3772         case 30:
3773                 val |= PIPEMISC_10_BPC;
3774                 break;
3775         case 36:
3776                 /* Port output 12BPC defined for ADLP+ */
3777                 if (DISPLAY_VER(dev_priv) > 12)
3778                         val |= PIPEMISC_12_BPC_ADLP;
3779                 break;
3780         default:
3781                 MISSING_CASE(crtc_state->pipe_bpp);
3782                 break;
3783         }
3784
3785         if (crtc_state->dither)
3786                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3787
3788         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3789             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3790                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3791
3792         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3793                 val |= PIPEMISC_YUV420_ENABLE |
3794                         PIPEMISC_YUV420_MODE_FULL_BLEND;
3795
3796         if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3797                 val |= PIPEMISC_HDR_MODE_PRECISION;
3798
3799         if (DISPLAY_VER(dev_priv) >= 12)
3800                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3801
3802         if (IS_ALDERLAKE_P(dev_priv)) {
3803                 bool scaler_in_use = false;
3804
3805                 for (i = 0; i < crtc->num_scalers; i++) {
3806                         if (!scaler_state->scalers[i].in_use)
3807                                 continue;
3808
3809                         scaler_in_use = true;
3810                         break;
3811                 }
3812
3813                 intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3814                              PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
3815                              scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3816                              PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3817         }
3818
3819         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3820 }
3821
3822 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3823 {
3824         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3825         u32 tmp;
3826
3827         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3828
3829         switch (tmp & PIPEMISC_BPC_MASK) {
3830         case PIPEMISC_6_BPC:
3831                 return 18;
3832         case PIPEMISC_8_BPC:
3833                 return 24;
3834         case PIPEMISC_10_BPC:
3835                 return 30;
3836         /*
3837          * PORT OUTPUT 12 BPC defined for ADLP+.
3838          *
3839          * TODO:
3840          * For previous platforms with DSI interface, bits 5:7
3841          * are used for storing pipe_bpp irrespective of dithering.
3842          * Since the value of 12 BPC is not defined for these bits
3843          * on older platforms, need to find a workaround for 12 BPC
3844          * MIPI DSI HW readout.
3845          */
3846         case PIPEMISC_12_BPC_ADLP:
3847                 if (DISPLAY_VER(dev_priv) > 12)
3848                         return 36;
3849                 fallthrough;
3850         default:
3851                 MISSING_CASE(tmp);
3852                 return 0;
3853         }
3854 }
3855
3856 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3857 {
3858         /*
3859          * Account for spread spectrum to avoid
3860          * oversubscribing the link. Max center spread
3861          * is 2.5%; use 5% for safety's sake.
3862          */
3863         u32 bps = target_clock * bpp * 21 / 20;
3864         return DIV_ROUND_UP(bps, link_bw * 8);
3865 }
3866
3867 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3868                                          struct intel_link_m_n *m_n)
3869 {
3870         struct drm_device *dev = crtc->base.dev;
3871         struct drm_i915_private *dev_priv = to_i915(dev);
3872         enum pipe pipe = crtc->pipe;
3873
3874         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
3875         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
3876         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3877                 & ~TU_SIZE_MASK;
3878         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
3879         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3880                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3881 }
3882
3883 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3884                                          enum transcoder transcoder,
3885                                          struct intel_link_m_n *m_n,
3886                                          struct intel_link_m_n *m2_n2)
3887 {
3888         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3889         enum pipe pipe = crtc->pipe;
3890
3891         if (DISPLAY_VER(dev_priv) >= 5) {
3892                 m_n->link_m = intel_de_read(dev_priv,
3893                                             PIPE_LINK_M1(transcoder));
3894                 m_n->link_n = intel_de_read(dev_priv,
3895                                             PIPE_LINK_N1(transcoder));
3896                 m_n->gmch_m = intel_de_read(dev_priv,
3897                                             PIPE_DATA_M1(transcoder))
3898                         & ~TU_SIZE_MASK;
3899                 m_n->gmch_n = intel_de_read(dev_priv,
3900                                             PIPE_DATA_N1(transcoder));
3901                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
3902                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3903
3904                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3905                         m2_n2->link_m = intel_de_read(dev_priv,
3906                                                       PIPE_LINK_M2(transcoder));
3907                         m2_n2->link_n = intel_de_read(dev_priv,
3908                                                              PIPE_LINK_N2(transcoder));
3909                         m2_n2->gmch_m = intel_de_read(dev_priv,
3910                                                              PIPE_DATA_M2(transcoder))
3911                                         & ~TU_SIZE_MASK;
3912                         m2_n2->gmch_n = intel_de_read(dev_priv,
3913                                                              PIPE_DATA_N2(transcoder));
3914                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
3915                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3916                 }
3917         } else {
3918                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
3919                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
3920                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3921                         & ~TU_SIZE_MASK;
3922                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
3923                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3924                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3925         }
3926 }
3927
3928 void intel_dp_get_m_n(struct intel_crtc *crtc,
3929                       struct intel_crtc_state *pipe_config)
3930 {
3931         if (pipe_config->has_pch_encoder)
3932                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3933         else
3934                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3935                                              &pipe_config->dp_m_n,
3936                                              &pipe_config->dp_m2_n2);
3937 }
3938
3939 void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3940                             struct intel_crtc_state *pipe_config)
3941 {
3942         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3943                                      &pipe_config->fdi_m_n, NULL);
3944 }
3945
3946 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3947                                   u32 pos, u32 size)
3948 {
3949         drm_rect_init(&crtc_state->pch_pfit.dst,
3950                       pos >> 16, pos & 0xffff,
3951                       size >> 16, size & 0xffff);
3952 }
3953
3954 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3955 {
3956         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3957         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3958         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3959         int id = -1;
3960         int i;
3961
3962         /* find scaler attached to this pipe */
3963         for (i = 0; i < crtc->num_scalers; i++) {
3964                 u32 ctl, pos, size;
3965
3966                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3967                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3968                         continue;
3969
3970                 id = i;
3971                 crtc_state->pch_pfit.enabled = true;
3972
3973                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3974                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3975
3976                 ilk_get_pfit_pos_size(crtc_state, pos, size);
3977
3978                 scaler_state->scalers[i].in_use = true;
3979                 break;
3980         }
3981
3982         scaler_state->scaler_id = id;
3983         if (id >= 0)
3984                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3985         else
3986                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3987 }
3988
3989 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3990 {
3991         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3992         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3993         u32 ctl, pos, size;
3994
3995         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3996         if ((ctl & PF_ENABLE) == 0)
3997                 return;
3998
3999         crtc_state->pch_pfit.enabled = true;
4000
4001         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
4002         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
4003
4004         ilk_get_pfit_pos_size(crtc_state, pos, size);
4005
4006         /*
4007          * We currently do not free assignements of panel fitters on
4008          * ivb/hsw (since we don't use the higher upscaling modes which
4009          * differentiates them) so just WARN about this case for now.
4010          */
4011         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
4012                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
4013 }
4014
4015 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
4016                                 struct intel_crtc_state *pipe_config)
4017 {
4018         struct drm_device *dev = crtc->base.dev;
4019         struct drm_i915_private *dev_priv = to_i915(dev);
4020         enum intel_display_power_domain power_domain;
4021         intel_wakeref_t wakeref;
4022         u32 tmp;
4023         bool ret;
4024
4025         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4026         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4027         if (!wakeref)
4028                 return false;
4029
4030         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4031         pipe_config->shared_dpll = NULL;
4032
4033         ret = false;
4034         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4035         if (!(tmp & PIPECONF_ENABLE))
4036                 goto out;
4037
4038         switch (tmp & PIPECONF_BPC_MASK) {
4039         case PIPECONF_6BPC:
4040                 pipe_config->pipe_bpp = 18;
4041                 break;
4042         case PIPECONF_8BPC:
4043                 pipe_config->pipe_bpp = 24;
4044                 break;
4045         case PIPECONF_10BPC:
4046                 pipe_config->pipe_bpp = 30;
4047                 break;
4048         case PIPECONF_12BPC:
4049                 pipe_config->pipe_bpp = 36;
4050                 break;
4051         default:
4052                 break;
4053         }
4054
4055         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4056                 pipe_config->limited_color_range = true;
4057
4058         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4059         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4060         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4061                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4062                 break;
4063         default:
4064                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4065                 break;
4066         }
4067
4068         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
4069                 PIPECONF_GAMMA_MODE_SHIFT;
4070
4071         pipe_config->csc_mode = intel_de_read(dev_priv,
4072                                               PIPE_CSC_MODE(crtc->pipe));
4073
4074         i9xx_get_pipe_color_config(pipe_config);
4075         intel_color_get_config(pipe_config);
4076
4077         pipe_config->pixel_multiplier = 1;
4078
4079         ilk_pch_get_config(pipe_config);
4080
4081         intel_get_transcoder_timings(crtc, pipe_config);
4082         intel_get_pipe_src_size(crtc, pipe_config);
4083
4084         ilk_get_pfit_config(pipe_config);
4085
4086         ret = true;
4087
4088 out:
4089         intel_display_power_put(dev_priv, power_domain, wakeref);
4090
4091         return ret;
4092 }
4093
4094 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4095 {
4096         if (DISPLAY_VER(i915) >= 12)
4097                 return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4098         else if (DISPLAY_VER(i915) >= 11)
4099                 return BIT(PIPE_B) | BIT(PIPE_C);
4100         else
4101                 return 0;
4102 }
4103
4104 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4105                                            enum transcoder cpu_transcoder)
4106 {
4107         enum intel_display_power_domain power_domain;
4108         intel_wakeref_t wakeref;
4109         u32 tmp = 0;
4110
4111         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4112
4113         with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4114                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4115
4116         return tmp & TRANS_DDI_FUNC_ENABLE;
4117 }
4118
4119 static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4120 {
4121         u8 master_pipes = 0, slave_pipes = 0;
4122         struct intel_crtc *crtc;
4123
4124         for_each_intel_crtc(&dev_priv->drm, crtc) {
4125                 enum intel_display_power_domain power_domain;
4126                 enum pipe pipe = crtc->pipe;
4127                 intel_wakeref_t wakeref;
4128
4129                 if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4130                         continue;
4131
4132                 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4133                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4134                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4135
4136                         if (!(tmp & BIG_JOINER_ENABLE))
4137                                 continue;
4138
4139                         if (tmp & MASTER_BIG_JOINER_ENABLE)
4140                                 master_pipes |= BIT(pipe);
4141                         else
4142                                 slave_pipes |= BIT(pipe);
4143                 }
4144
4145                 if (DISPLAY_VER(dev_priv) < 13)
4146                         continue;
4147
4148                 power_domain = POWER_DOMAIN_PIPE(pipe);
4149                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4150                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4151
4152                         if (tmp & UNCOMPRESSED_JOINER_MASTER)
4153                                 master_pipes |= BIT(pipe);
4154                         if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4155                                 slave_pipes |= BIT(pipe);
4156                 }
4157         }
4158
4159         /* Bigjoiner pipes should always be consecutive master and slave */
4160         drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4161                  "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4162                  master_pipes, slave_pipes);
4163
4164         return slave_pipes;
4165 }
4166
4167 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4168 {
4169         u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4170
4171         if (DISPLAY_VER(i915) >= 11)
4172                 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4173
4174         return panel_transcoder_mask;
4175 }
4176
4177 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4178 {
4179         struct drm_device *dev = crtc->base.dev;
4180         struct drm_i915_private *dev_priv = to_i915(dev);
4181         u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4182         enum transcoder cpu_transcoder;
4183         u8 enabled_transcoders = 0;
4184
4185         /*
4186          * XXX: Do intel_display_power_get_if_enabled before reading this (for
4187          * consistency and less surprising code; it's in always on power).
4188          */
4189         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4190                                        panel_transcoder_mask) {
4191                 enum intel_display_power_domain power_domain;
4192                 intel_wakeref_t wakeref;
4193                 enum pipe trans_pipe;
4194                 u32 tmp = 0;
4195
4196                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4197                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4198                         tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4199
4200                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4201                         continue;
4202
4203                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4204                 default:
4205                         drm_WARN(dev, 1,
4206                                  "unknown pipe linked to transcoder %s\n",
4207                                  transcoder_name(cpu_transcoder));
4208                         fallthrough;
4209                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
4210                 case TRANS_DDI_EDP_INPUT_A_ON:
4211                         trans_pipe = PIPE_A;
4212                         break;
4213                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
4214                         trans_pipe = PIPE_B;
4215                         break;
4216                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
4217                         trans_pipe = PIPE_C;
4218                         break;
4219                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
4220                         trans_pipe = PIPE_D;
4221                         break;
4222                 }
4223
4224                 if (trans_pipe == crtc->pipe)
4225                         enabled_transcoders |= BIT(cpu_transcoder);
4226         }
4227
4228         /* single pipe or bigjoiner master */
4229         cpu_transcoder = (enum transcoder) crtc->pipe;
4230         if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4231                 enabled_transcoders |= BIT(cpu_transcoder);
4232
4233         /* bigjoiner slave -> consider the master pipe's transcoder as well */
4234         if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4235                 cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4236                 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4237                         enabled_transcoders |= BIT(cpu_transcoder);
4238         }
4239
4240         return enabled_transcoders;
4241 }
4242
4243 static bool has_edp_transcoders(u8 enabled_transcoders)
4244 {
4245         return enabled_transcoders & BIT(TRANSCODER_EDP);
4246 }
4247
4248 static bool has_dsi_transcoders(u8 enabled_transcoders)
4249 {
4250         return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4251                                       BIT(TRANSCODER_DSI_1));
4252 }
4253
4254 static bool has_pipe_transcoders(u8 enabled_transcoders)
4255 {
4256         return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4257                                        BIT(TRANSCODER_DSI_0) |
4258                                        BIT(TRANSCODER_DSI_1));
4259 }
4260
4261 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4262                                        u8 enabled_transcoders)
4263 {
4264         /* Only one type of transcoder please */
4265         drm_WARN_ON(&i915->drm,
4266                     has_edp_transcoders(enabled_transcoders) +
4267                     has_dsi_transcoders(enabled_transcoders) +
4268                     has_pipe_transcoders(enabled_transcoders) > 1);
4269
4270         /* Only DSI transcoders can be ganged */
4271         drm_WARN_ON(&i915->drm,
4272                     !has_dsi_transcoders(enabled_transcoders) &&
4273                     !is_power_of_2(enabled_transcoders));
4274 }
4275
4276 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4277                                      struct intel_crtc_state *pipe_config,
4278                                      struct intel_display_power_domain_set *power_domain_set)
4279 {
4280         struct drm_device *dev = crtc->base.dev;
4281         struct drm_i915_private *dev_priv = to_i915(dev);
4282         unsigned long enabled_transcoders;
4283         u32 tmp;
4284
4285         enabled_transcoders = hsw_enabled_transcoders(crtc);
4286         if (!enabled_transcoders)
4287                 return false;
4288
4289         assert_enabled_transcoders(dev_priv, enabled_transcoders);
4290
4291         /*
4292          * With the exception of DSI we should only ever have
4293          * a single enabled transcoder. With DSI let's just
4294          * pick the first one.
4295          */
4296         pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4297
4298         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4299                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4300                 return false;
4301
4302         if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4303                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4304
4305                 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4306                         pipe_config->pch_pfit.force_thru = true;
4307         }
4308
4309         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4310
4311         return tmp & PIPECONF_ENABLE;
4312 }
4313
4314 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4315                                          struct intel_crtc_state *pipe_config,
4316                                          struct intel_display_power_domain_set *power_domain_set)
4317 {
4318         struct drm_device *dev = crtc->base.dev;
4319         struct drm_i915_private *dev_priv = to_i915(dev);
4320         enum transcoder cpu_transcoder;
4321         enum port port;
4322         u32 tmp;
4323
4324         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4325                 if (port == PORT_A)
4326                         cpu_transcoder = TRANSCODER_DSI_A;
4327                 else
4328                         cpu_transcoder = TRANSCODER_DSI_C;
4329
4330                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4331                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4332                         continue;
4333
4334                 /*
4335                  * The PLL needs to be enabled with a valid divider
4336                  * configuration, otherwise accessing DSI registers will hang
4337                  * the machine. See BSpec North Display Engine
4338                  * registers/MIPI[BXT]. We can break out here early, since we
4339                  * need the same DSI PLL to be enabled for both DSI ports.
4340                  */
4341                 if (!bxt_dsi_pll_is_enabled(dev_priv))
4342                         break;
4343
4344                 /* XXX: this works for video mode only */
4345                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4346                 if (!(tmp & DPI_ENABLE))
4347                         continue;
4348
4349                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4350                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4351                         continue;
4352
4353                 pipe_config->cpu_transcoder = cpu_transcoder;
4354                 break;
4355         }
4356
4357         return transcoder_is_dsi(pipe_config->cpu_transcoder);
4358 }
4359
4360 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4361                                 struct intel_crtc_state *pipe_config)
4362 {
4363         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4364         struct intel_display_power_domain_set power_domain_set = { };
4365         bool active;
4366         u32 tmp;
4367
4368         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4369                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
4370                 return false;
4371
4372         pipe_config->shared_dpll = NULL;
4373
4374         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4375
4376         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4377             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4378                 drm_WARN_ON(&dev_priv->drm, active);
4379                 active = true;
4380         }
4381
4382         if (!active)
4383                 goto out;
4384
4385         intel_dsc_get_config(pipe_config);
4386         if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4387                 intel_uncompressed_joiner_get_config(pipe_config);
4388
4389         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4390             DISPLAY_VER(dev_priv) >= 11)
4391                 intel_get_transcoder_timings(crtc, pipe_config);
4392
4393         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4394                 intel_vrr_get_config(crtc, pipe_config);
4395
4396         intel_get_pipe_src_size(crtc, pipe_config);
4397
4398         if (IS_HASWELL(dev_priv)) {
4399                 u32 tmp = intel_de_read(dev_priv,
4400                                         PIPECONF(pipe_config->cpu_transcoder));
4401
4402                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4403                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4404                 else
4405                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4406         } else {
4407                 pipe_config->output_format =
4408                         bdw_get_pipemisc_output_format(crtc);
4409         }
4410
4411         pipe_config->gamma_mode = intel_de_read(dev_priv,
4412                                                 GAMMA_MODE(crtc->pipe));
4413
4414         pipe_config->csc_mode = intel_de_read(dev_priv,
4415                                               PIPE_CSC_MODE(crtc->pipe));
4416
4417         if (DISPLAY_VER(dev_priv) >= 9) {
4418                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4419
4420                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4421                         pipe_config->gamma_enable = true;
4422
4423                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4424                         pipe_config->csc_enable = true;
4425         } else {
4426                 i9xx_get_pipe_color_config(pipe_config);
4427         }
4428
4429         intel_color_get_config(pipe_config);
4430
4431         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4432         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4433         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4434                 pipe_config->ips_linetime =
4435                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4436
4437         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4438                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4439                 if (DISPLAY_VER(dev_priv) >= 9)
4440                         skl_get_pfit_config(pipe_config);
4441                 else
4442                         ilk_get_pfit_config(pipe_config);
4443         }
4444
4445         if (hsw_crtc_supports_ips(crtc)) {
4446                 if (IS_HASWELL(dev_priv))
4447                         pipe_config->ips_enabled = intel_de_read(dev_priv,
4448                                                                  IPS_CTL) & IPS_ENABLE;
4449                 else {
4450                         /*
4451                          * We cannot readout IPS state on broadwell, set to
4452                          * true so we can set it to a defined state on first
4453                          * commit.
4454                          */
4455                         pipe_config->ips_enabled = true;
4456                 }
4457         }
4458
4459         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4460             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4461                 pipe_config->pixel_multiplier =
4462                         intel_de_read(dev_priv,
4463                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4464         } else {
4465                 pipe_config->pixel_multiplier = 1;
4466         }
4467
4468 out:
4469         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4470
4471         return active;
4472 }
4473
4474 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4475 {
4476         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4477         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4478
4479         if (!i915->display->get_pipe_config(crtc, crtc_state))
4480                 return false;
4481
4482         crtc_state->hw.active = true;
4483
4484         intel_crtc_readout_derived_state(crtc_state);
4485
4486         return true;
4487 }
4488
4489 /* VESA 640x480x72Hz mode to set on the pipe */
4490 static const struct drm_display_mode load_detect_mode = {
4491         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4492                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4493 };
4494
4495 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4496                                         struct drm_crtc *crtc)
4497 {
4498         struct drm_plane *plane;
4499         struct drm_plane_state *plane_state;
4500         int ret, i;
4501
4502         ret = drm_atomic_add_affected_planes(state, crtc);
4503         if (ret)
4504                 return ret;
4505
4506         for_each_new_plane_in_state(state, plane, plane_state, i) {
4507                 if (plane_state->crtc != crtc)
4508                         continue;
4509
4510                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4511                 if (ret)
4512                         return ret;
4513
4514                 drm_atomic_set_fb_for_plane(plane_state, NULL);
4515         }
4516
4517         return 0;
4518 }
4519
4520 int intel_get_load_detect_pipe(struct drm_connector *connector,
4521                                struct intel_load_detect_pipe *old,
4522                                struct drm_modeset_acquire_ctx *ctx)
4523 {
4524         struct intel_encoder *encoder =
4525                 intel_attached_encoder(to_intel_connector(connector));
4526         struct intel_crtc *possible_crtc;
4527         struct intel_crtc *crtc = NULL;
4528         struct drm_device *dev = encoder->base.dev;
4529         struct drm_i915_private *dev_priv = to_i915(dev);
4530         struct drm_mode_config *config = &dev->mode_config;
4531         struct drm_atomic_state *state = NULL, *restore_state = NULL;
4532         struct drm_connector_state *connector_state;
4533         struct intel_crtc_state *crtc_state;
4534         int ret;
4535
4536         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4537                     connector->base.id, connector->name,
4538                     encoder->base.base.id, encoder->base.name);
4539
4540         old->restore_state = NULL;
4541
4542         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4543
4544         /*
4545          * Algorithm gets a little messy:
4546          *
4547          *   - if the connector already has an assigned crtc, use it (but make
4548          *     sure it's on first)
4549          *
4550          *   - try to find the first unused crtc that can drive this connector,
4551          *     and use that if we find one
4552          */
4553
4554         /* See if we already have a CRTC for this connector */
4555         if (connector->state->crtc) {
4556                 crtc = to_intel_crtc(connector->state->crtc);
4557
4558                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4559                 if (ret)
4560                         goto fail;
4561
4562                 /* Make sure the crtc and connector are running */
4563                 goto found;
4564         }
4565
4566         /* Find an unused one (if possible) */
4567         for_each_intel_crtc(dev, possible_crtc) {
4568                 if (!(encoder->base.possible_crtcs &
4569                       drm_crtc_mask(&possible_crtc->base)))
4570                         continue;
4571
4572                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4573                 if (ret)
4574                         goto fail;
4575
4576                 if (possible_crtc->base.state->enable) {
4577                         drm_modeset_unlock(&possible_crtc->base.mutex);
4578                         continue;
4579                 }
4580
4581                 crtc = possible_crtc;
4582                 break;
4583         }
4584
4585         /*
4586          * If we didn't find an unused CRTC, don't use any.
4587          */
4588         if (!crtc) {
4589                 drm_dbg_kms(&dev_priv->drm,
4590                             "no pipe available for load-detect\n");
4591                 ret = -ENODEV;
4592                 goto fail;
4593         }
4594
4595 found:
4596         state = drm_atomic_state_alloc(dev);
4597         restore_state = drm_atomic_state_alloc(dev);
4598         if (!state || !restore_state) {
4599                 ret = -ENOMEM;
4600                 goto fail;
4601         }
4602
4603         state->acquire_ctx = ctx;
4604         restore_state->acquire_ctx = ctx;
4605
4606         connector_state = drm_atomic_get_connector_state(state, connector);
4607         if (IS_ERR(connector_state)) {
4608                 ret = PTR_ERR(connector_state);
4609                 goto fail;
4610         }
4611
4612         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4613         if (ret)
4614                 goto fail;
4615
4616         crtc_state = intel_atomic_get_crtc_state(state, crtc);
4617         if (IS_ERR(crtc_state)) {
4618                 ret = PTR_ERR(crtc_state);
4619                 goto fail;
4620         }
4621
4622         crtc_state->uapi.active = true;
4623
4624         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4625                                            &load_detect_mode);
4626         if (ret)
4627                 goto fail;
4628
4629         ret = intel_modeset_disable_planes(state, &crtc->base);
4630         if (ret)
4631                 goto fail;
4632
4633         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4634         if (!ret)
4635                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4636         if (!ret)
4637                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4638         if (ret) {
4639                 drm_dbg_kms(&dev_priv->drm,
4640                             "Failed to create a copy of old state to restore: %i\n",
4641                             ret);
4642                 goto fail;
4643         }
4644
4645         ret = drm_atomic_commit(state);
4646         if (ret) {
4647                 drm_dbg_kms(&dev_priv->drm,
4648                             "failed to set mode on load-detect pipe\n");
4649                 goto fail;
4650         }
4651
4652         old->restore_state = restore_state;
4653         drm_atomic_state_put(state);
4654
4655         /* let the connector get through one full cycle before testing */
4656         intel_crtc_wait_for_next_vblank(crtc);
4657
4658         return true;
4659
4660 fail:
4661         if (state) {
4662                 drm_atomic_state_put(state);
4663                 state = NULL;
4664         }
4665         if (restore_state) {
4666                 drm_atomic_state_put(restore_state);
4667                 restore_state = NULL;
4668         }
4669
4670         if (ret == -EDEADLK)
4671                 return ret;
4672
4673         return false;
4674 }
4675
4676 void intel_release_load_detect_pipe(struct drm_connector *connector,
4677                                     struct intel_load_detect_pipe *old,
4678                                     struct drm_modeset_acquire_ctx *ctx)
4679 {
4680         struct intel_encoder *intel_encoder =
4681                 intel_attached_encoder(to_intel_connector(connector));
4682         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4683         struct drm_encoder *encoder = &intel_encoder->base;
4684         struct drm_atomic_state *state = old->restore_state;
4685         int ret;
4686
4687         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4688                     connector->base.id, connector->name,
4689                     encoder->base.id, encoder->name);
4690
4691         if (!state)
4692                 return;
4693
4694         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4695         if (ret)
4696                 drm_dbg_kms(&i915->drm,
4697                             "Couldn't release load detect pipe: %i\n", ret);
4698         drm_atomic_state_put(state);
4699 }
4700
4701 static int i9xx_pll_refclk(struct drm_device *dev,
4702                            const struct intel_crtc_state *pipe_config)
4703 {
4704         struct drm_i915_private *dev_priv = to_i915(dev);
4705         u32 dpll = pipe_config->dpll_hw_state.dpll;
4706
4707         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4708                 return dev_priv->vbt.lvds_ssc_freq;
4709         else if (HAS_PCH_SPLIT(dev_priv))
4710                 return 120000;
4711         else if (DISPLAY_VER(dev_priv) != 2)
4712                 return 96000;
4713         else
4714                 return 48000;
4715 }
4716
4717 /* Returns the clock of the currently programmed mode of the given pipe. */
4718 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4719                          struct intel_crtc_state *pipe_config)
4720 {
4721         struct drm_device *dev = crtc->base.dev;
4722         struct drm_i915_private *dev_priv = to_i915(dev);
4723         u32 dpll = pipe_config->dpll_hw_state.dpll;
4724         u32 fp;
4725         struct dpll clock;
4726         int port_clock;
4727         int refclk = i9xx_pll_refclk(dev, pipe_config);
4728
4729         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4730                 fp = pipe_config->dpll_hw_state.fp0;
4731         else
4732                 fp = pipe_config->dpll_hw_state.fp1;
4733
4734         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4735         if (IS_PINEVIEW(dev_priv)) {
4736                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4737                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4738         } else {
4739                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4740                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4741         }
4742
4743         if (DISPLAY_VER(dev_priv) != 2) {
4744                 if (IS_PINEVIEW(dev_priv))
4745                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4746                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4747                 else
4748                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4749                                DPLL_FPA01_P1_POST_DIV_SHIFT);
4750
4751                 switch (dpll & DPLL_MODE_MASK) {
4752                 case DPLLB_MODE_DAC_SERIAL:
4753                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4754                                 5 : 10;
4755                         break;
4756                 case DPLLB_MODE_LVDS:
4757                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4758                                 7 : 14;
4759                         break;
4760                 default:
4761                         drm_dbg_kms(&dev_priv->drm,
4762                                     "Unknown DPLL mode %08x in programmed "
4763                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
4764                         return;
4765                 }
4766
4767                 if (IS_PINEVIEW(dev_priv))
4768                         port_clock = pnv_calc_dpll_params(refclk, &clock);
4769                 else
4770                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
4771         } else {
4772                 enum pipe lvds_pipe;
4773
4774                 if (IS_I85X(dev_priv) &&
4775                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4776                     lvds_pipe == crtc->pipe) {
4777                         u32 lvds = intel_de_read(dev_priv, LVDS);
4778
4779                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4780                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
4781
4782                         if (lvds & LVDS_CLKB_POWER_UP)
4783                                 clock.p2 = 7;
4784                         else
4785                                 clock.p2 = 14;
4786                 } else {
4787                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
4788                                 clock.p1 = 2;
4789                         else {
4790                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4791                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4792                         }
4793                         if (dpll & PLL_P2_DIVIDE_BY_4)
4794                                 clock.p2 = 4;
4795                         else
4796                                 clock.p2 = 2;
4797                 }
4798
4799                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4800         }
4801
4802         /*
4803          * This value includes pixel_multiplier. We will use
4804          * port_clock to compute adjusted_mode.crtc_clock in the
4805          * encoder's get_config() function.
4806          */
4807         pipe_config->port_clock = port_clock;
4808 }
4809
4810 int intel_dotclock_calculate(int link_freq,
4811                              const struct intel_link_m_n *m_n)
4812 {
4813         /*
4814          * The calculation for the data clock is:
4815          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4816          * But we want to avoid losing precison if possible, so:
4817          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4818          *
4819          * and the link clock is simpler:
4820          * link_clock = (m * link_clock) / n
4821          */
4822
4823         if (!m_n->link_n)
4824                 return 0;
4825
4826         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4827 }
4828
4829 /* Returns the currently programmed mode of the given encoder. */
4830 struct drm_display_mode *
4831 intel_encoder_current_mode(struct intel_encoder *encoder)
4832 {
4833         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4834         struct intel_crtc_state *crtc_state;
4835         struct drm_display_mode *mode;
4836         struct intel_crtc *crtc;
4837         enum pipe pipe;
4838
4839         if (!encoder->get_hw_state(encoder, &pipe))
4840                 return NULL;
4841
4842         crtc = intel_crtc_for_pipe(dev_priv, pipe);
4843
4844         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4845         if (!mode)
4846                 return NULL;
4847
4848         crtc_state = intel_crtc_state_alloc(crtc);
4849         if (!crtc_state) {
4850                 kfree(mode);
4851                 return NULL;
4852         }
4853
4854         if (!intel_crtc_get_pipe_config(crtc_state)) {
4855                 kfree(crtc_state);
4856                 kfree(mode);
4857                 return NULL;
4858         }
4859
4860         intel_encoder_get_config(encoder, crtc_state);
4861
4862         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4863
4864         kfree(crtc_state);
4865
4866         return mode;
4867 }
4868
4869 /**
4870  * intel_wm_need_update - Check whether watermarks need updating
4871  * @cur: current plane state
4872  * @new: new plane state
4873  *
4874  * Check current plane state versus the new one to determine whether
4875  * watermarks need to be recalculated.
4876  *
4877  * Returns true or false.
4878  */
4879 static bool intel_wm_need_update(const struct intel_plane_state *cur,
4880                                  struct intel_plane_state *new)
4881 {
4882         /* Update watermarks on tiling or size changes. */
4883         if (new->uapi.visible != cur->uapi.visible)
4884                 return true;
4885
4886         if (!cur->hw.fb || !new->hw.fb)
4887                 return false;
4888
4889         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4890             cur->hw.rotation != new->hw.rotation ||
4891             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4892             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4893             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4894             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4895                 return true;
4896
4897         return false;
4898 }
4899
4900 static bool needs_scaling(const struct intel_plane_state *state)
4901 {
4902         int src_w = drm_rect_width(&state->uapi.src) >> 16;
4903         int src_h = drm_rect_height(&state->uapi.src) >> 16;
4904         int dst_w = drm_rect_width(&state->uapi.dst);
4905         int dst_h = drm_rect_height(&state->uapi.dst);
4906
4907         return (src_w != dst_w || src_h != dst_h);
4908 }
4909
4910 static bool intel_plane_do_async_flip(struct intel_plane *plane,
4911                                       const struct intel_crtc_state *old_crtc_state,
4912                                       const struct intel_crtc_state *new_crtc_state)
4913 {
4914         struct drm_i915_private *i915 = to_i915(plane->base.dev);
4915
4916         if (!plane->async_flip)
4917                 return false;
4918
4919         if (!new_crtc_state->uapi.async_flip)
4920                 return false;
4921
4922         /*
4923          * In platforms after DISPLAY13, we might need to override
4924          * first async flip in order to change watermark levels
4925          * as part of optimization.
4926          * So for those, we are checking if this is a first async flip.
4927          * For platforms earlier than DISPLAY13 we always do async flip.
4928          */
4929         return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
4930 }
4931
4932 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4933                                     struct intel_crtc_state *new_crtc_state,
4934                                     const struct intel_plane_state *old_plane_state,
4935                                     struct intel_plane_state *new_plane_state)
4936 {
4937         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4938         struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4939         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4940         bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4941         bool was_crtc_enabled = old_crtc_state->hw.active;
4942         bool is_crtc_enabled = new_crtc_state->hw.active;
4943         bool turn_off, turn_on, visible, was_visible;
4944         int ret;
4945
4946         if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4947                 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4948                 if (ret)
4949                         return ret;
4950         }
4951
4952         was_visible = old_plane_state->uapi.visible;
4953         visible = new_plane_state->uapi.visible;
4954
4955         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4956                 was_visible = false;
4957
4958         /*
4959          * Visibility is calculated as if the crtc was on, but
4960          * after scaler setup everything depends on it being off
4961          * when the crtc isn't active.
4962          *
4963          * FIXME this is wrong for watermarks. Watermarks should also
4964          * be computed as if the pipe would be active. Perhaps move
4965          * per-plane wm computation to the .check_plane() hook, and
4966          * only combine the results from all planes in the current place?
4967          */
4968         if (!is_crtc_enabled) {
4969                 intel_plane_set_invisible(new_crtc_state, new_plane_state);
4970                 visible = false;
4971         }
4972
4973         if (!was_visible && !visible)
4974                 return 0;
4975
4976         turn_off = was_visible && (!visible || mode_changed);
4977         turn_on = visible && (!was_visible || mode_changed);
4978
4979         drm_dbg_atomic(&dev_priv->drm,
4980                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4981                        crtc->base.base.id, crtc->base.name,
4982                        plane->base.base.id, plane->base.name,
4983                        was_visible, visible,
4984                        turn_off, turn_on, mode_changed);
4985
4986         if (turn_on) {
4987                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4988                         new_crtc_state->update_wm_pre = true;
4989
4990                 /* must disable cxsr around plane enable/disable */
4991                 if (plane->id != PLANE_CURSOR)
4992                         new_crtc_state->disable_cxsr = true;
4993         } else if (turn_off) {
4994                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4995                         new_crtc_state->update_wm_post = true;
4996
4997                 /* must disable cxsr around plane enable/disable */
4998                 if (plane->id != PLANE_CURSOR)
4999                         new_crtc_state->disable_cxsr = true;
5000         } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
5001                 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
5002                         /* FIXME bollocks */
5003                         new_crtc_state->update_wm_pre = true;
5004                         new_crtc_state->update_wm_post = true;
5005                 }
5006         }
5007
5008         if (visible || was_visible)
5009                 new_crtc_state->fb_bits |= plane->frontbuffer_bit;
5010
5011         /*
5012          * ILK/SNB DVSACNTR/Sprite Enable
5013          * IVB SPR_CTL/Sprite Enable
5014          * "When in Self Refresh Big FIFO mode, a write to enable the
5015          *  plane will be internally buffered and delayed while Big FIFO
5016          *  mode is exiting."
5017          *
5018          * Which means that enabling the sprite can take an extra frame
5019          * when we start in big FIFO mode (LP1+). Thus we need to drop
5020          * down to LP0 and wait for vblank in order to make sure the
5021          * sprite gets enabled on the next vblank after the register write.
5022          * Doing otherwise would risk enabling the sprite one frame after
5023          * we've already signalled flip completion. We can resume LP1+
5024          * once the sprite has been enabled.
5025          *
5026          *
5027          * WaCxSRDisabledForSpriteScaling:ivb
5028          * IVB SPR_SCALE/Scaling Enable
5029          * "Low Power watermarks must be disabled for at least one
5030          *  frame before enabling sprite scaling, and kept disabled
5031          *  until sprite scaling is disabled."
5032          *
5033          * ILK/SNB DVSASCALE/Scaling Enable
5034          * "When in Self Refresh Big FIFO mode, scaling enable will be
5035          *  masked off while Big FIFO mode is exiting."
5036          *
5037          * Despite the w/a only being listed for IVB we assume that
5038          * the ILK/SNB note has similar ramifications, hence we apply
5039          * the w/a on all three platforms.
5040          *
5041          * With experimental results seems this is needed also for primary
5042          * plane, not only sprite plane.
5043          */
5044         if (plane->id != PLANE_CURSOR &&
5045             (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5046              IS_IVYBRIDGE(dev_priv)) &&
5047             (turn_on || (!needs_scaling(old_plane_state) &&
5048                          needs_scaling(new_plane_state))))
5049                 new_crtc_state->disable_lp_wm = true;
5050
5051         if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
5052                 new_plane_state->do_async_flip = true;
5053
5054         return 0;
5055 }
5056
5057 static bool encoders_cloneable(const struct intel_encoder *a,
5058                                const struct intel_encoder *b)
5059 {
5060         /* masks could be asymmetric, so check both ways */
5061         return a == b || (a->cloneable & (1 << b->type) &&
5062                           b->cloneable & (1 << a->type));
5063 }
5064
5065 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5066                                          struct intel_crtc *crtc,
5067                                          struct intel_encoder *encoder)
5068 {
5069         struct intel_encoder *source_encoder;
5070         struct drm_connector *connector;
5071         struct drm_connector_state *connector_state;
5072         int i;
5073
5074         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5075                 if (connector_state->crtc != &crtc->base)
5076                         continue;
5077
5078                 source_encoder =
5079                         to_intel_encoder(connector_state->best_encoder);
5080                 if (!encoders_cloneable(encoder, source_encoder))
5081                         return false;
5082         }
5083
5084         return true;
5085 }
5086
5087 static int icl_add_linked_planes(struct intel_atomic_state *state)
5088 {
5089         struct intel_plane *plane, *linked;
5090         struct intel_plane_state *plane_state, *linked_plane_state;
5091         int i;
5092
5093         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5094                 linked = plane_state->planar_linked_plane;
5095
5096                 if (!linked)
5097                         continue;
5098
5099                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
5100                 if (IS_ERR(linked_plane_state))
5101                         return PTR_ERR(linked_plane_state);
5102
5103                 drm_WARN_ON(state->base.dev,
5104                             linked_plane_state->planar_linked_plane != plane);
5105                 drm_WARN_ON(state->base.dev,
5106                             linked_plane_state->planar_slave == plane_state->planar_slave);
5107         }
5108
5109         return 0;
5110 }
5111
5112 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5113 {
5114         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5115         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5116         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5117         struct intel_plane *plane, *linked;
5118         struct intel_plane_state *plane_state;
5119         int i;
5120
5121         if (DISPLAY_VER(dev_priv) < 11)
5122                 return 0;
5123
5124         /*
5125          * Destroy all old plane links and make the slave plane invisible
5126          * in the crtc_state->active_planes mask.
5127          */
5128         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5129                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5130                         continue;
5131
5132                 plane_state->planar_linked_plane = NULL;
5133                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
5134                         crtc_state->enabled_planes &= ~BIT(plane->id);
5135                         crtc_state->active_planes &= ~BIT(plane->id);
5136                         crtc_state->update_planes |= BIT(plane->id);
5137                 }
5138
5139                 plane_state->planar_slave = false;
5140         }
5141
5142         if (!crtc_state->nv12_planes)
5143                 return 0;
5144
5145         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5146                 struct intel_plane_state *linked_state = NULL;
5147
5148                 if (plane->pipe != crtc->pipe ||
5149                     !(crtc_state->nv12_planes & BIT(plane->id)))
5150                         continue;
5151
5152                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5153                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5154                                 continue;
5155
5156                         if (crtc_state->active_planes & BIT(linked->id))
5157                                 continue;
5158
5159                         linked_state = intel_atomic_get_plane_state(state, linked);
5160                         if (IS_ERR(linked_state))
5161                                 return PTR_ERR(linked_state);
5162
5163                         break;
5164                 }
5165
5166                 if (!linked_state) {
5167                         drm_dbg_kms(&dev_priv->drm,
5168                                     "Need %d free Y planes for planar YUV\n",
5169                                     hweight8(crtc_state->nv12_planes));
5170
5171                         return -EINVAL;
5172                 }
5173
5174                 plane_state->planar_linked_plane = linked;
5175
5176                 linked_state->planar_slave = true;
5177                 linked_state->planar_linked_plane = plane;
5178                 crtc_state->enabled_planes |= BIT(linked->id);
5179                 crtc_state->active_planes |= BIT(linked->id);
5180                 crtc_state->update_planes |= BIT(linked->id);
5181                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5182                             linked->base.name, plane->base.name);
5183
5184                 /* Copy parameters to slave plane */
5185                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5186                 linked_state->color_ctl = plane_state->color_ctl;
5187                 linked_state->view = plane_state->view;
5188                 linked_state->decrypt = plane_state->decrypt;
5189
5190                 intel_plane_copy_hw_state(linked_state, plane_state);
5191                 linked_state->uapi.src = plane_state->uapi.src;
5192                 linked_state->uapi.dst = plane_state->uapi.dst;
5193
5194                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
5195                         if (linked->id == PLANE_SPRITE5)
5196                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
5197                         else if (linked->id == PLANE_SPRITE4)
5198                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
5199                         else if (linked->id == PLANE_SPRITE3)
5200                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
5201                         else if (linked->id == PLANE_SPRITE2)
5202                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
5203                         else
5204                                 MISSING_CASE(linked->id);
5205                 }
5206         }
5207
5208         return 0;
5209 }
5210
5211 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5212 {
5213         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5214         struct intel_atomic_state *state =
5215                 to_intel_atomic_state(new_crtc_state->uapi.state);
5216         const struct intel_crtc_state *old_crtc_state =
5217                 intel_atomic_get_old_crtc_state(state, crtc);
5218
5219         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5220 }
5221
5222 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5223 {
5224         const struct drm_display_mode *pipe_mode =
5225                 &crtc_state->hw.pipe_mode;
5226         int linetime_wm;
5227
5228         if (!crtc_state->hw.enable)
5229                 return 0;
5230
5231         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5232                                         pipe_mode->crtc_clock);
5233
5234         return min(linetime_wm, 0x1ff);
5235 }
5236
5237 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5238                                const struct intel_cdclk_state *cdclk_state)
5239 {
5240         const struct drm_display_mode *pipe_mode =
5241                 &crtc_state->hw.pipe_mode;
5242         int linetime_wm;
5243
5244         if (!crtc_state->hw.enable)
5245                 return 0;
5246
5247         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5248                                         cdclk_state->logical.cdclk);
5249
5250         return min(linetime_wm, 0x1ff);
5251 }
5252
5253 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5254 {
5255         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5256         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5257         const struct drm_display_mode *pipe_mode =
5258                 &crtc_state->hw.pipe_mode;
5259         int linetime_wm;
5260
5261         if (!crtc_state->hw.enable)
5262                 return 0;
5263
5264         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5265                                    crtc_state->pixel_rate);
5266
5267         /* Display WA #1135: BXT:ALL GLK:ALL */
5268         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5269             dev_priv->ipc_enabled)
5270                 linetime_wm /= 2;
5271
5272         return min(linetime_wm, 0x1ff);
5273 }
5274
5275 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5276                                    struct intel_crtc *crtc)
5277 {
5278         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5279         struct intel_crtc_state *crtc_state =
5280                 intel_atomic_get_new_crtc_state(state, crtc);
5281         const struct intel_cdclk_state *cdclk_state;
5282
5283         if (DISPLAY_VER(dev_priv) >= 9)
5284                 crtc_state->linetime = skl_linetime_wm(crtc_state);
5285         else
5286                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
5287
5288         if (!hsw_crtc_supports_ips(crtc))
5289                 return 0;
5290
5291         cdclk_state = intel_atomic_get_cdclk_state(state);
5292         if (IS_ERR(cdclk_state))
5293                 return PTR_ERR(cdclk_state);
5294
5295         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5296                                                        cdclk_state);
5297
5298         return 0;
5299 }
5300
5301 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5302                                    struct intel_crtc *crtc)
5303 {
5304         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5305         struct intel_crtc_state *crtc_state =
5306                 intel_atomic_get_new_crtc_state(state, crtc);
5307         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5308         int ret;
5309
5310         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5311             mode_changed && !crtc_state->hw.active)
5312                 crtc_state->update_wm_post = true;
5313
5314         if (mode_changed && crtc_state->hw.enable &&
5315             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5316                 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5317                 if (ret)
5318                         return ret;
5319         }
5320
5321         /*
5322          * May need to update pipe gamma enable bits
5323          * when C8 planes are getting enabled/disabled.
5324          */
5325         if (c8_planes_changed(crtc_state))
5326                 crtc_state->uapi.color_mgmt_changed = true;
5327
5328         if (mode_changed || crtc_state->update_pipe ||
5329             crtc_state->uapi.color_mgmt_changed) {
5330                 ret = intel_color_check(crtc_state);
5331                 if (ret)
5332                         return ret;
5333         }
5334
5335         ret = intel_compute_pipe_wm(state, crtc);
5336         if (ret) {
5337                 drm_dbg_kms(&dev_priv->drm,
5338                             "Target pipe watermarks are invalid\n");
5339                 return ret;
5340         }
5341
5342         /*
5343          * Calculate 'intermediate' watermarks that satisfy both the
5344          * old state and the new state.  We can program these
5345          * immediately.
5346          */
5347         ret = intel_compute_intermediate_wm(state, crtc);
5348         if (ret) {
5349                 drm_dbg_kms(&dev_priv->drm,
5350                             "No valid intermediate pipe watermarks are possible\n");
5351                 return ret;
5352         }
5353
5354         if (DISPLAY_VER(dev_priv) >= 9) {
5355                 if (mode_changed || crtc_state->update_pipe) {
5356                         ret = skl_update_scaler_crtc(crtc_state);
5357                         if (ret)
5358                                 return ret;
5359                 }
5360
5361                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5362                 if (ret)
5363                         return ret;
5364         }
5365
5366         if (HAS_IPS(dev_priv)) {
5367                 ret = hsw_compute_ips_config(crtc_state);
5368                 if (ret)
5369                         return ret;
5370         }
5371
5372         if (DISPLAY_VER(dev_priv) >= 9 ||
5373             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5374                 ret = hsw_compute_linetime_wm(state, crtc);
5375                 if (ret)
5376                         return ret;
5377
5378         }
5379
5380         ret = intel_psr2_sel_fetch_update(state, crtc);
5381         if (ret)
5382                 return ret;
5383
5384         return 0;
5385 }
5386
5387 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5388 {
5389         struct intel_connector *connector;
5390         struct drm_connector_list_iter conn_iter;
5391
5392         drm_connector_list_iter_begin(dev, &conn_iter);
5393         for_each_intel_connector_iter(connector, &conn_iter) {
5394                 struct drm_connector_state *conn_state = connector->base.state;
5395                 struct intel_encoder *encoder =
5396                         to_intel_encoder(connector->base.encoder);
5397
5398                 if (conn_state->crtc)
5399                         drm_connector_put(&connector->base);
5400
5401                 if (encoder) {
5402                         struct intel_crtc *crtc =
5403                                 to_intel_crtc(encoder->base.crtc);
5404                         const struct intel_crtc_state *crtc_state =
5405                                 to_intel_crtc_state(crtc->base.state);
5406
5407                         conn_state->best_encoder = &encoder->base;
5408                         conn_state->crtc = &crtc->base;
5409                         conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5410
5411                         drm_connector_get(&connector->base);
5412                 } else {
5413                         conn_state->best_encoder = NULL;
5414                         conn_state->crtc = NULL;
5415                 }
5416         }
5417         drm_connector_list_iter_end(&conn_iter);
5418 }
5419
5420 static int
5421 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5422                       struct intel_crtc_state *pipe_config)
5423 {
5424         struct drm_connector *connector = conn_state->connector;
5425         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5426         const struct drm_display_info *info = &connector->display_info;
5427         int bpp;
5428
5429         switch (conn_state->max_bpc) {
5430         case 6 ... 7:
5431                 bpp = 6 * 3;
5432                 break;
5433         case 8 ... 9:
5434                 bpp = 8 * 3;
5435                 break;
5436         case 10 ... 11:
5437                 bpp = 10 * 3;
5438                 break;
5439         case 12 ... 16:
5440                 bpp = 12 * 3;
5441                 break;
5442         default:
5443                 MISSING_CASE(conn_state->max_bpc);
5444                 return -EINVAL;
5445         }
5446
5447         if (bpp < pipe_config->pipe_bpp) {
5448                 drm_dbg_kms(&i915->drm,
5449                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5450                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5451                             connector->base.id, connector->name,
5452                             bpp, 3 * info->bpc,
5453                             3 * conn_state->max_requested_bpc,
5454                             pipe_config->pipe_bpp);
5455
5456                 pipe_config->pipe_bpp = bpp;
5457         }
5458
5459         return 0;
5460 }
5461
5462 static int
5463 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5464                           struct intel_crtc_state *pipe_config)
5465 {
5466         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5467         struct drm_atomic_state *state = pipe_config->uapi.state;
5468         struct drm_connector *connector;
5469         struct drm_connector_state *connector_state;
5470         int bpp, i;
5471
5472         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5473             IS_CHERRYVIEW(dev_priv)))
5474                 bpp = 10*3;
5475         else if (DISPLAY_VER(dev_priv) >= 5)
5476                 bpp = 12*3;
5477         else
5478                 bpp = 8*3;
5479
5480         pipe_config->pipe_bpp = bpp;
5481
5482         /* Clamp display bpp to connector max bpp */
5483         for_each_new_connector_in_state(state, connector, connector_state, i) {
5484                 int ret;
5485
5486                 if (connector_state->crtc != &crtc->base)
5487                         continue;
5488
5489                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5490                 if (ret)
5491                         return ret;
5492         }
5493
5494         return 0;
5495 }
5496
5497 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5498                                     const struct drm_display_mode *mode)
5499 {
5500         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5501                     "type: 0x%x flags: 0x%x\n",
5502                     mode->crtc_clock,
5503                     mode->crtc_hdisplay, mode->crtc_hsync_start,
5504                     mode->crtc_hsync_end, mode->crtc_htotal,
5505                     mode->crtc_vdisplay, mode->crtc_vsync_start,
5506                     mode->crtc_vsync_end, mode->crtc_vtotal,
5507                     mode->type, mode->flags);
5508 }
5509
5510 static void
5511 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5512                       const char *id, unsigned int lane_count,
5513                       const struct intel_link_m_n *m_n)
5514 {
5515         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5516
5517         drm_dbg_kms(&i915->drm,
5518                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5519                     id, lane_count,
5520                     m_n->gmch_m, m_n->gmch_n,
5521                     m_n->link_m, m_n->link_n, m_n->tu);
5522 }
5523
5524 static void
5525 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5526                      const union hdmi_infoframe *frame)
5527 {
5528         if (!drm_debug_enabled(DRM_UT_KMS))
5529                 return;
5530
5531         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5532 }
5533
5534 static void
5535 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5536                       const struct drm_dp_vsc_sdp *vsc)
5537 {
5538         if (!drm_debug_enabled(DRM_UT_KMS))
5539                 return;
5540
5541         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5542 }
5543
5544 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5545
5546 static const char * const output_type_str[] = {
5547         OUTPUT_TYPE(UNUSED),
5548         OUTPUT_TYPE(ANALOG),
5549         OUTPUT_TYPE(DVO),
5550         OUTPUT_TYPE(SDVO),
5551         OUTPUT_TYPE(LVDS),
5552         OUTPUT_TYPE(TVOUT),
5553         OUTPUT_TYPE(HDMI),
5554         OUTPUT_TYPE(DP),
5555         OUTPUT_TYPE(EDP),
5556         OUTPUT_TYPE(DSI),
5557         OUTPUT_TYPE(DDI),
5558         OUTPUT_TYPE(DP_MST),
5559 };
5560
5561 #undef OUTPUT_TYPE
5562
5563 static void snprintf_output_types(char *buf, size_t len,
5564                                   unsigned int output_types)
5565 {
5566         char *str = buf;
5567         int i;
5568
5569         str[0] = '\0';
5570
5571         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5572                 int r;
5573
5574                 if ((output_types & BIT(i)) == 0)
5575                         continue;
5576
5577                 r = snprintf(str, len, "%s%s",
5578                              str != buf ? "," : "", output_type_str[i]);
5579                 if (r >= len)
5580                         break;
5581                 str += r;
5582                 len -= r;
5583
5584                 output_types &= ~BIT(i);
5585         }
5586
5587         WARN_ON_ONCE(output_types != 0);
5588 }
5589
5590 static const char * const output_format_str[] = {
5591         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5592         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5593         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5594 };
5595
5596 static const char *output_formats(enum intel_output_format format)
5597 {
5598         if (format >= ARRAY_SIZE(output_format_str))
5599                 return "invalid";
5600         return output_format_str[format];
5601 }
5602
5603 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5604 {
5605         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5606         struct drm_i915_private *i915 = to_i915(plane->base.dev);
5607         const struct drm_framebuffer *fb = plane_state->hw.fb;
5608
5609         if (!fb) {
5610                 drm_dbg_kms(&i915->drm,
5611                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5612                             plane->base.base.id, plane->base.name,
5613                             yesno(plane_state->uapi.visible));
5614                 return;
5615         }
5616
5617         drm_dbg_kms(&i915->drm,
5618                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5619                     plane->base.base.id, plane->base.name,
5620                     fb->base.id, fb->width, fb->height, &fb->format->format,
5621                     fb->modifier, yesno(plane_state->uapi.visible));
5622         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5623                     plane_state->hw.rotation, plane_state->scaler_id);
5624         if (plane_state->uapi.visible)
5625                 drm_dbg_kms(&i915->drm,
5626                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5627                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
5628                             DRM_RECT_ARG(&plane_state->uapi.dst));
5629 }
5630
5631 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5632                                    struct intel_atomic_state *state,
5633                                    const char *context)
5634 {
5635         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5636         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5637         const struct intel_plane_state *plane_state;
5638         struct intel_plane *plane;
5639         char buf[64];
5640         int i;
5641
5642         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5643                     crtc->base.base.id, crtc->base.name,
5644                     yesno(pipe_config->hw.enable), context);
5645
5646         if (!pipe_config->hw.enable)
5647                 goto dump_planes;
5648
5649         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5650         drm_dbg_kms(&dev_priv->drm,
5651                     "active: %s, output_types: %s (0x%x), output format: %s\n",
5652                     yesno(pipe_config->hw.active),
5653                     buf, pipe_config->output_types,
5654                     output_formats(pipe_config->output_format));
5655
5656         drm_dbg_kms(&dev_priv->drm,
5657                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5658                     transcoder_name(pipe_config->cpu_transcoder),
5659                     pipe_config->pipe_bpp, pipe_config->dither);
5660
5661         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5662                     transcoder_name(pipe_config->mst_master_transcoder));
5663
5664         drm_dbg_kms(&dev_priv->drm,
5665                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5666                     transcoder_name(pipe_config->master_transcoder),
5667                     pipe_config->sync_mode_slaves_mask);
5668
5669         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5670                     pipe_config->bigjoiner_slave ? "slave" :
5671                     pipe_config->bigjoiner ? "master" : "no");
5672
5673         drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5674                     enableddisabled(pipe_config->splitter.enable),
5675                     pipe_config->splitter.link_count,
5676                     pipe_config->splitter.pixel_overlap);
5677
5678         if (pipe_config->has_pch_encoder)
5679                 intel_dump_m_n_config(pipe_config, "fdi",
5680                                       pipe_config->fdi_lanes,
5681                                       &pipe_config->fdi_m_n);
5682
5683         if (intel_crtc_has_dp_encoder(pipe_config)) {
5684                 intel_dump_m_n_config(pipe_config, "dp m_n",
5685                                 pipe_config->lane_count, &pipe_config->dp_m_n);
5686                 if (pipe_config->has_drrs)
5687                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
5688                                               pipe_config->lane_count,
5689                                               &pipe_config->dp_m2_n2);
5690         }
5691
5692         drm_dbg_kms(&dev_priv->drm,
5693                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5694                     pipe_config->has_audio, pipe_config->has_infoframe,
5695                     pipe_config->infoframes.enable);
5696
5697         if (pipe_config->infoframes.enable &
5698             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5699                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5700                             pipe_config->infoframes.gcp);
5701         if (pipe_config->infoframes.enable &
5702             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5703                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5704         if (pipe_config->infoframes.enable &
5705             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5706                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5707         if (pipe_config->infoframes.enable &
5708             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5709                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5710         if (pipe_config->infoframes.enable &
5711             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5712                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5713         if (pipe_config->infoframes.enable &
5714             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5715                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5716         if (pipe_config->infoframes.enable &
5717             intel_hdmi_infoframe_enable(DP_SDP_VSC))
5718                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5719
5720         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5721                     yesno(pipe_config->vrr.enable),
5722                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5723                     pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5724                     pipe_config->vrr.flipline,
5725                     intel_vrr_vmin_vblank_start(pipe_config),
5726                     intel_vrr_vmax_vblank_start(pipe_config));
5727
5728         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5729         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5730         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5731         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5732         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5733         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5734         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5735         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5736         drm_dbg_kms(&dev_priv->drm,
5737                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5738                     pipe_config->port_clock,
5739                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5740                     pipe_config->pixel_rate);
5741
5742         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5743                     pipe_config->linetime, pipe_config->ips_linetime);
5744
5745         if (DISPLAY_VER(dev_priv) >= 9)
5746                 drm_dbg_kms(&dev_priv->drm,
5747                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5748                             crtc->num_scalers,
5749                             pipe_config->scaler_state.scaler_users,
5750                             pipe_config->scaler_state.scaler_id);
5751
5752         if (HAS_GMCH(dev_priv))
5753                 drm_dbg_kms(&dev_priv->drm,
5754                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5755                             pipe_config->gmch_pfit.control,
5756                             pipe_config->gmch_pfit.pgm_ratios,
5757                             pipe_config->gmch_pfit.lvds_border_bits);
5758         else
5759                 drm_dbg_kms(&dev_priv->drm,
5760                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5761                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5762                             enableddisabled(pipe_config->pch_pfit.enabled),
5763                             yesno(pipe_config->pch_pfit.force_thru));
5764
5765         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5766                     pipe_config->ips_enabled, pipe_config->double_wide);
5767
5768         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5769
5770         if (IS_CHERRYVIEW(dev_priv))
5771                 drm_dbg_kms(&dev_priv->drm,
5772                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5773                             pipe_config->cgm_mode, pipe_config->gamma_mode,
5774                             pipe_config->gamma_enable, pipe_config->csc_enable);
5775         else
5776                 drm_dbg_kms(&dev_priv->drm,
5777                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5778                             pipe_config->csc_mode, pipe_config->gamma_mode,
5779                             pipe_config->gamma_enable, pipe_config->csc_enable);
5780
5781         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5782                     pipe_config->hw.degamma_lut ?
5783                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5784                     pipe_config->hw.gamma_lut ?
5785                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5786
5787 dump_planes:
5788         if (!state)
5789                 return;
5790
5791         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5792                 if (plane->pipe == crtc->pipe)
5793                         intel_dump_plane_state(plane_state);
5794         }
5795 }
5796
5797 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5798 {
5799         struct drm_device *dev = state->base.dev;
5800         struct drm_connector *connector;
5801         struct drm_connector_list_iter conn_iter;
5802         unsigned int used_ports = 0;
5803         unsigned int used_mst_ports = 0;
5804         bool ret = true;
5805
5806         /*
5807          * We're going to peek into connector->state,
5808          * hence connection_mutex must be held.
5809          */
5810         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5811
5812         /*
5813          * Walk the connector list instead of the encoder
5814          * list to detect the problem on ddi platforms
5815          * where there's just one encoder per digital port.
5816          */
5817         drm_connector_list_iter_begin(dev, &conn_iter);
5818         drm_for_each_connector_iter(connector, &conn_iter) {
5819                 struct drm_connector_state *connector_state;
5820                 struct intel_encoder *encoder;
5821
5822                 connector_state =
5823                         drm_atomic_get_new_connector_state(&state->base,
5824                                                            connector);
5825                 if (!connector_state)
5826                         connector_state = connector->state;
5827
5828                 if (!connector_state->best_encoder)
5829                         continue;
5830
5831                 encoder = to_intel_encoder(connector_state->best_encoder);
5832
5833                 drm_WARN_ON(dev, !connector_state->crtc);
5834
5835                 switch (encoder->type) {
5836                 case INTEL_OUTPUT_DDI:
5837                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5838                                 break;
5839                         fallthrough;
5840                 case INTEL_OUTPUT_DP:
5841                 case INTEL_OUTPUT_HDMI:
5842                 case INTEL_OUTPUT_EDP:
5843                         /* the same port mustn't appear more than once */
5844                         if (used_ports & BIT(encoder->port))
5845                                 ret = false;
5846
5847                         used_ports |= BIT(encoder->port);
5848                         break;
5849                 case INTEL_OUTPUT_DP_MST:
5850                         used_mst_ports |=
5851                                 1 << encoder->port;
5852                         break;
5853                 default:
5854                         break;
5855                 }
5856         }
5857         drm_connector_list_iter_end(&conn_iter);
5858
5859         /* can't mix MST and SST/HDMI on the same port */
5860         if (used_ports & used_mst_ports)
5861                 return false;
5862
5863         return ret;
5864 }
5865
5866 static void
5867 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5868                                            struct intel_crtc_state *crtc_state)
5869 {
5870         const struct intel_crtc_state *master_crtc_state;
5871         struct intel_crtc *master_crtc;
5872
5873         master_crtc = intel_master_crtc(crtc_state);
5874         master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5875
5876         /* No need to copy state if the master state is unchanged */
5877         if (master_crtc_state)
5878                 intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5879 }
5880
5881 static void
5882 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5883                                  struct intel_crtc_state *crtc_state)
5884 {
5885         crtc_state->hw.enable = crtc_state->uapi.enable;
5886         crtc_state->hw.active = crtc_state->uapi.active;
5887         crtc_state->hw.mode = crtc_state->uapi.mode;
5888         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5889         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5890
5891         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5892 }
5893
5894 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5895 {
5896         if (crtc_state->bigjoiner_slave)
5897                 return;
5898
5899         crtc_state->uapi.enable = crtc_state->hw.enable;
5900         crtc_state->uapi.active = crtc_state->hw.active;
5901         drm_WARN_ON(crtc_state->uapi.crtc->dev,
5902                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5903
5904         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5905         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5906
5907         /* copy color blobs to uapi */
5908         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5909                                   crtc_state->hw.degamma_lut);
5910         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5911                                   crtc_state->hw.gamma_lut);
5912         drm_property_replace_blob(&crtc_state->uapi.ctm,
5913                                   crtc_state->hw.ctm);
5914 }
5915
5916 static int
5917 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5918                           const struct intel_crtc_state *from_crtc_state)
5919 {
5920         struct intel_crtc_state *saved_state;
5921
5922         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5923         if (!saved_state)
5924                 return -ENOMEM;
5925
5926         saved_state->uapi = crtc_state->uapi;
5927         saved_state->scaler_state = crtc_state->scaler_state;
5928         saved_state->shared_dpll = crtc_state->shared_dpll;
5929         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5930         saved_state->crc_enabled = crtc_state->crc_enabled;
5931
5932         intel_crtc_free_hw_state(crtc_state);
5933         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5934         kfree(saved_state);
5935
5936         /* Re-init hw state */
5937         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5938         crtc_state->hw.enable = from_crtc_state->hw.enable;
5939         crtc_state->hw.active = from_crtc_state->hw.active;
5940         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5941         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5942
5943         /* Some fixups */
5944         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5945         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5946         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5947         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5948         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5949         crtc_state->bigjoiner_slave = true;
5950         crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5951         crtc_state->has_audio = from_crtc_state->has_audio;
5952
5953         return 0;
5954 }
5955
5956 static int
5957 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5958                                  struct intel_crtc_state *crtc_state)
5959 {
5960         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5961         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5962         struct intel_crtc_state *saved_state;
5963
5964         saved_state = intel_crtc_state_alloc(crtc);
5965         if (!saved_state)
5966                 return -ENOMEM;
5967
5968         /* free the old crtc_state->hw members */
5969         intel_crtc_free_hw_state(crtc_state);
5970
5971         /* FIXME: before the switch to atomic started, a new pipe_config was
5972          * kzalloc'd. Code that depends on any field being zero should be
5973          * fixed, so that the crtc_state can be safely duplicated. For now,
5974          * only fields that are know to not cause problems are preserved. */
5975
5976         saved_state->uapi = crtc_state->uapi;
5977         saved_state->scaler_state = crtc_state->scaler_state;
5978         saved_state->shared_dpll = crtc_state->shared_dpll;
5979         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5980         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5981                sizeof(saved_state->icl_port_dplls));
5982         saved_state->crc_enabled = crtc_state->crc_enabled;
5983         if (IS_G4X(dev_priv) ||
5984             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5985                 saved_state->wm = crtc_state->wm;
5986
5987         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5988         kfree(saved_state);
5989
5990         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5991
5992         return 0;
5993 }
5994
5995 static int
5996 intel_modeset_pipe_config(struct intel_atomic_state *state,
5997                           struct intel_crtc_state *pipe_config)
5998 {
5999         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6000         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
6001         struct drm_connector *connector;
6002         struct drm_connector_state *connector_state;
6003         int base_bpp, ret, i;
6004         bool retry = true;
6005
6006         pipe_config->cpu_transcoder =
6007                 (enum transcoder) to_intel_crtc(crtc)->pipe;
6008
6009         /*
6010          * Sanitize sync polarity flags based on requested ones. If neither
6011          * positive or negative polarity is requested, treat this as meaning
6012          * negative polarity.
6013          */
6014         if (!(pipe_config->hw.adjusted_mode.flags &
6015               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
6016                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
6017
6018         if (!(pipe_config->hw.adjusted_mode.flags &
6019               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
6020                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
6021
6022         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
6023                                         pipe_config);
6024         if (ret)
6025                 return ret;
6026
6027         base_bpp = pipe_config->pipe_bpp;
6028
6029         /*
6030          * Determine the real pipe dimensions. Note that stereo modes can
6031          * increase the actual pipe size due to the frame doubling and
6032          * insertion of additional space for blanks between the frame. This
6033          * is stored in the crtc timings. We use the requested mode to do this
6034          * computation to clearly distinguish it from the adjusted mode, which
6035          * can be changed by the connectors in the below retry loop.
6036          */
6037         drm_mode_get_hv_timing(&pipe_config->hw.mode,
6038                                &pipe_config->pipe_src_w,
6039                                &pipe_config->pipe_src_h);
6040
6041         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6042                 struct intel_encoder *encoder =
6043                         to_intel_encoder(connector_state->best_encoder);
6044
6045                 if (connector_state->crtc != crtc)
6046                         continue;
6047
6048                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6049                         drm_dbg_kms(&i915->drm,
6050                                     "rejecting invalid cloning configuration\n");
6051                         return -EINVAL;
6052                 }
6053
6054                 /*
6055                  * Determine output_types before calling the .compute_config()
6056                  * hooks so that the hooks can use this information safely.
6057                  */
6058                 if (encoder->compute_output_type)
6059                         pipe_config->output_types |=
6060                                 BIT(encoder->compute_output_type(encoder, pipe_config,
6061                                                                  connector_state));
6062                 else
6063                         pipe_config->output_types |= BIT(encoder->type);
6064         }
6065
6066 encoder_retry:
6067         /* Ensure the port clock defaults are reset when retrying. */
6068         pipe_config->port_clock = 0;
6069         pipe_config->pixel_multiplier = 1;
6070
6071         /* Fill in default crtc timings, allow encoders to overwrite them. */
6072         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6073                               CRTC_STEREO_DOUBLE);
6074
6075         /* Pass our mode to the connectors and the CRTC to give them a chance to
6076          * adjust it according to limitations or connector properties, and also
6077          * a chance to reject the mode entirely.
6078          */
6079         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6080                 struct intel_encoder *encoder =
6081                         to_intel_encoder(connector_state->best_encoder);
6082
6083                 if (connector_state->crtc != crtc)
6084                         continue;
6085
6086                 ret = encoder->compute_config(encoder, pipe_config,
6087                                               connector_state);
6088                 if (ret == -EDEADLK)
6089                         return ret;
6090                 if (ret < 0) {
6091                         drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6092                         return ret;
6093                 }
6094         }
6095
6096         /* Set default port clock if not overwritten by the encoder. Needs to be
6097          * done afterwards in case the encoder adjusts the mode. */
6098         if (!pipe_config->port_clock)
6099                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6100                         * pipe_config->pixel_multiplier;
6101
6102         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6103         if (ret == -EDEADLK)
6104                 return ret;
6105         if (ret == -EAGAIN) {
6106                 if (drm_WARN(&i915->drm, !retry,
6107                              "loop in pipe configuration computation\n"))
6108                         return -EINVAL;
6109
6110                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6111                 retry = false;
6112                 goto encoder_retry;
6113         }
6114         if (ret < 0) {
6115                 drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6116                 return ret;
6117         }
6118
6119         /* Dithering seems to not pass-through bits correctly when it should, so
6120          * only enable it on 6bpc panels and when its not a compliance
6121          * test requesting 6bpc video pattern.
6122          */
6123         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6124                 !pipe_config->dither_force_disable;
6125         drm_dbg_kms(&i915->drm,
6126                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6127                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6128
6129         return 0;
6130 }
6131
6132 static int
6133 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6134 {
6135         struct intel_atomic_state *state =
6136                 to_intel_atomic_state(crtc_state->uapi.state);
6137         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6138         struct drm_connector_state *conn_state;
6139         struct drm_connector *connector;
6140         int i;
6141
6142         for_each_new_connector_in_state(&state->base, connector,
6143                                         conn_state, i) {
6144                 struct intel_encoder *encoder =
6145                         to_intel_encoder(conn_state->best_encoder);
6146                 int ret;
6147
6148                 if (conn_state->crtc != &crtc->base ||
6149                     !encoder->compute_config_late)
6150                         continue;
6151
6152                 ret = encoder->compute_config_late(encoder, crtc_state,
6153                                                    conn_state);
6154                 if (ret)
6155                         return ret;
6156         }
6157
6158         return 0;
6159 }
6160
6161 bool intel_fuzzy_clock_check(int clock1, int clock2)
6162 {
6163         int diff;
6164
6165         if (clock1 == clock2)
6166                 return true;
6167
6168         if (!clock1 || !clock2)
6169                 return false;
6170
6171         diff = abs(clock1 - clock2);
6172
6173         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6174                 return true;
6175
6176         return false;
6177 }
6178
6179 static bool
6180 intel_compare_m_n(unsigned int m, unsigned int n,
6181                   unsigned int m2, unsigned int n2,
6182                   bool exact)
6183 {
6184         if (m == m2 && n == n2)
6185                 return true;
6186
6187         if (exact || !m || !n || !m2 || !n2)
6188                 return false;
6189
6190         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6191
6192         if (n > n2) {
6193                 while (n > n2) {
6194                         m2 <<= 1;
6195                         n2 <<= 1;
6196                 }
6197         } else if (n < n2) {
6198                 while (n < n2) {
6199                         m <<= 1;
6200                         n <<= 1;
6201                 }
6202         }
6203
6204         if (n != n2)
6205                 return false;
6206
6207         return intel_fuzzy_clock_check(m, m2);
6208 }
6209
6210 static bool
6211 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6212                        const struct intel_link_m_n *m2_n2,
6213                        bool exact)
6214 {
6215         return m_n->tu == m2_n2->tu &&
6216                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
6217                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
6218                 intel_compare_m_n(m_n->link_m, m_n->link_n,
6219                                   m2_n2->link_m, m2_n2->link_n, exact);
6220 }
6221
6222 static bool
6223 intel_compare_infoframe(const union hdmi_infoframe *a,
6224                         const union hdmi_infoframe *b)
6225 {
6226         return memcmp(a, b, sizeof(*a)) == 0;
6227 }
6228
6229 static bool
6230 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6231                          const struct drm_dp_vsc_sdp *b)
6232 {
6233         return memcmp(a, b, sizeof(*a)) == 0;
6234 }
6235
6236 static void
6237 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6238                                bool fastset, const char *name,
6239                                const union hdmi_infoframe *a,
6240                                const union hdmi_infoframe *b)
6241 {
6242         if (fastset) {
6243                 if (!drm_debug_enabled(DRM_UT_KMS))
6244                         return;
6245
6246                 drm_dbg_kms(&dev_priv->drm,
6247                             "fastset mismatch in %s infoframe\n", name);
6248                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6249                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6250                 drm_dbg_kms(&dev_priv->drm, "found:\n");
6251                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6252         } else {
6253                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6254                 drm_err(&dev_priv->drm, "expected:\n");
6255                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6256                 drm_err(&dev_priv->drm, "found:\n");
6257                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6258         }
6259 }
6260
6261 static void
6262 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6263                                 bool fastset, const char *name,
6264                                 const struct drm_dp_vsc_sdp *a,
6265                                 const struct drm_dp_vsc_sdp *b)
6266 {
6267         if (fastset) {
6268                 if (!drm_debug_enabled(DRM_UT_KMS))
6269                         return;
6270
6271                 drm_dbg_kms(&dev_priv->drm,
6272                             "fastset mismatch in %s dp sdp\n", name);
6273                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
6274                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6275                 drm_dbg_kms(&dev_priv->drm, "found:\n");
6276                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6277         } else {
6278                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6279                 drm_err(&dev_priv->drm, "expected:\n");
6280                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6281                 drm_err(&dev_priv->drm, "found:\n");
6282                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6283         }
6284 }
6285
6286 static void __printf(4, 5)
6287 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6288                      const char *name, const char *format, ...)
6289 {
6290         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6291         struct va_format vaf;
6292         va_list args;
6293
6294         va_start(args, format);
6295         vaf.fmt = format;
6296         vaf.va = &args;
6297
6298         if (fastset)
6299                 drm_dbg_kms(&i915->drm,
6300                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6301                             crtc->base.base.id, crtc->base.name, name, &vaf);
6302         else
6303                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6304                         crtc->base.base.id, crtc->base.name, name, &vaf);
6305
6306         va_end(args);
6307 }
6308
6309 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6310 {
6311         if (dev_priv->params.fastboot != -1)
6312                 return dev_priv->params.fastboot;
6313
6314         /* Enable fastboot by default on Skylake and newer */
6315         if (DISPLAY_VER(dev_priv) >= 9)
6316                 return true;
6317
6318         /* Enable fastboot by default on VLV and CHV */
6319         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6320                 return true;
6321
6322         /* Disabled by default on all others */
6323         return false;
6324 }
6325
6326 static bool
6327 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6328                           const struct intel_crtc_state *pipe_config,
6329                           bool fastset)
6330 {
6331         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6332         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6333         bool ret = true;
6334         u32 bp_gamma = 0;
6335         bool fixup_inherited = fastset &&
6336                 current_config->inherited && !pipe_config->inherited;
6337
6338         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6339                 drm_dbg_kms(&dev_priv->drm,
6340                             "initial modeset and fastboot not set\n");
6341                 ret = false;
6342         }
6343
6344 #define PIPE_CONF_CHECK_X(name) do { \
6345         if (current_config->name != pipe_config->name) { \
6346                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6347                                      "(expected 0x%08x, found 0x%08x)", \
6348                                      current_config->name, \
6349                                      pipe_config->name); \
6350                 ret = false; \
6351         } \
6352 } while (0)
6353
6354 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6355         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6356                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6357                                      "(expected 0x%08x, found 0x%08x)", \
6358                                      current_config->name & (mask), \
6359                                      pipe_config->name & (mask)); \
6360                 ret = false; \
6361         } \
6362 } while (0)
6363
6364 #define PIPE_CONF_CHECK_I(name) do { \
6365         if (current_config->name != pipe_config->name) { \
6366                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6367                                      "(expected %i, found %i)", \
6368                                      current_config->name, \
6369                                      pipe_config->name); \
6370                 ret = false; \
6371         } \
6372 } while (0)
6373
6374 #define PIPE_CONF_CHECK_BOOL(name) do { \
6375         if (current_config->name != pipe_config->name) { \
6376                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
6377                                      "(expected %s, found %s)", \
6378                                      yesno(current_config->name), \
6379                                      yesno(pipe_config->name)); \
6380                 ret = false; \
6381         } \
6382 } while (0)
6383
6384 /*
6385  * Checks state where we only read out the enabling, but not the entire
6386  * state itself (like full infoframes or ELD for audio). These states
6387  * require a full modeset on bootup to fix up.
6388  */
6389 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6390         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6391                 PIPE_CONF_CHECK_BOOL(name); \
6392         } else { \
6393                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6394                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6395                                      yesno(current_config->name), \
6396                                      yesno(pipe_config->name)); \
6397                 ret = false; \
6398         } \
6399 } while (0)
6400
6401 #define PIPE_CONF_CHECK_P(name) do { \
6402         if (current_config->name != pipe_config->name) { \
6403                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6404                                      "(expected %p, found %p)", \
6405                                      current_config->name, \
6406                                      pipe_config->name); \
6407                 ret = false; \
6408         } \
6409 } while (0)
6410
6411 #define PIPE_CONF_CHECK_M_N(name) do { \
6412         if (!intel_compare_link_m_n(&current_config->name, \
6413                                     &pipe_config->name,\
6414                                     !fastset)) { \
6415                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6416                                      "(expected tu %i gmch %i/%i link %i/%i, " \
6417                                      "found tu %i, gmch %i/%i link %i/%i)", \
6418                                      current_config->name.tu, \
6419                                      current_config->name.gmch_m, \
6420                                      current_config->name.gmch_n, \
6421                                      current_config->name.link_m, \
6422                                      current_config->name.link_n, \
6423                                      pipe_config->name.tu, \
6424                                      pipe_config->name.gmch_m, \
6425                                      pipe_config->name.gmch_n, \
6426                                      pipe_config->name.link_m, \
6427                                      pipe_config->name.link_n); \
6428                 ret = false; \
6429         } \
6430 } while (0)
6431
6432 /* This is required for BDW+ where there is only one set of registers for
6433  * switching between high and low RR.
6434  * This macro can be used whenever a comparison has to be made between one
6435  * hw state and multiple sw state variables.
6436  */
6437 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6438         if (!intel_compare_link_m_n(&current_config->name, \
6439                                     &pipe_config->name, !fastset) && \
6440             !intel_compare_link_m_n(&current_config->alt_name, \
6441                                     &pipe_config->name, !fastset)) { \
6442                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6443                                      "(expected tu %i gmch %i/%i link %i/%i, " \
6444                                      "or tu %i gmch %i/%i link %i/%i, " \
6445                                      "found tu %i, gmch %i/%i link %i/%i)", \
6446                                      current_config->name.tu, \
6447                                      current_config->name.gmch_m, \
6448                                      current_config->name.gmch_n, \
6449                                      current_config->name.link_m, \
6450                                      current_config->name.link_n, \
6451                                      current_config->alt_name.tu, \
6452                                      current_config->alt_name.gmch_m, \
6453                                      current_config->alt_name.gmch_n, \
6454                                      current_config->alt_name.link_m, \
6455                                      current_config->alt_name.link_n, \
6456                                      pipe_config->name.tu, \
6457                                      pipe_config->name.gmch_m, \
6458                                      pipe_config->name.gmch_n, \
6459                                      pipe_config->name.link_m, \
6460                                      pipe_config->name.link_n); \
6461                 ret = false; \
6462         } \
6463 } while (0)
6464
6465 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6466         if ((current_config->name ^ pipe_config->name) & (mask)) { \
6467                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6468                                      "(%x) (expected %i, found %i)", \
6469                                      (mask), \
6470                                      current_config->name & (mask), \
6471                                      pipe_config->name & (mask)); \
6472                 ret = false; \
6473         } \
6474 } while (0)
6475
6476 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6477         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6478                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
6479                                      "(expected %i, found %i)", \
6480                                      current_config->name, \
6481                                      pipe_config->name); \
6482                 ret = false; \
6483         } \
6484 } while (0)
6485
6486 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6487         if (!intel_compare_infoframe(&current_config->infoframes.name, \
6488                                      &pipe_config->infoframes.name)) { \
6489                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6490                                                &current_config->infoframes.name, \
6491                                                &pipe_config->infoframes.name); \
6492                 ret = false; \
6493         } \
6494 } while (0)
6495
6496 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6497         if (!current_config->has_psr && !pipe_config->has_psr && \
6498             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
6499                                       &pipe_config->infoframes.name)) { \
6500                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6501                                                 &current_config->infoframes.name, \
6502                                                 &pipe_config->infoframes.name); \
6503                 ret = false; \
6504         } \
6505 } while (0)
6506
6507 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6508         if (current_config->name1 != pipe_config->name1) { \
6509                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6510                                 "(expected %i, found %i, won't compare lut values)", \
6511                                 current_config->name1, \
6512                                 pipe_config->name1); \
6513                 ret = false;\
6514         } else { \
6515                 if (!intel_color_lut_equal(current_config->name2, \
6516                                         pipe_config->name2, pipe_config->name1, \
6517                                         bit_precision)) { \
6518                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6519                                         "hw_state doesn't match sw_state"); \
6520                         ret = false; \
6521                 } \
6522         } \
6523 } while (0)
6524
6525 #define PIPE_CONF_QUIRK(quirk) \
6526         ((current_config->quirks | pipe_config->quirks) & (quirk))
6527
6528         PIPE_CONF_CHECK_I(cpu_transcoder);
6529
6530         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6531         PIPE_CONF_CHECK_I(fdi_lanes);
6532         PIPE_CONF_CHECK_M_N(fdi_m_n);
6533
6534         PIPE_CONF_CHECK_I(lane_count);
6535         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6536
6537         if (DISPLAY_VER(dev_priv) < 8) {
6538                 PIPE_CONF_CHECK_M_N(dp_m_n);
6539
6540                 if (current_config->has_drrs)
6541                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
6542         } else
6543                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6544
6545         PIPE_CONF_CHECK_X(output_types);
6546
6547         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6548         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6549         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6550         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6551         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6552         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6553
6554         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6555         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6556         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6557         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6558         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6559         PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6560
6561         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6562         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6563         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6564         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6565         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6566         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6567
6568         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6569         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6570         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6571         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6572         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6573         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6574
6575         PIPE_CONF_CHECK_I(pixel_multiplier);
6576
6577         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6578                               DRM_MODE_FLAG_INTERLACE);
6579
6580         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6581                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6582                                       DRM_MODE_FLAG_PHSYNC);
6583                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6584                                       DRM_MODE_FLAG_NHSYNC);
6585                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6586                                       DRM_MODE_FLAG_PVSYNC);
6587                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6588                                       DRM_MODE_FLAG_NVSYNC);
6589         }
6590
6591         PIPE_CONF_CHECK_I(output_format);
6592         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6593         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6594             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6595                 PIPE_CONF_CHECK_BOOL(limited_color_range);
6596
6597         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6598         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6599         PIPE_CONF_CHECK_BOOL(has_infoframe);
6600         PIPE_CONF_CHECK_BOOL(fec_enable);
6601
6602         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6603
6604         PIPE_CONF_CHECK_X(gmch_pfit.control);
6605         /* pfit ratios are autocomputed by the hw on gen4+ */
6606         if (DISPLAY_VER(dev_priv) < 4)
6607                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6608         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6609
6610         /*
6611          * Changing the EDP transcoder input mux
6612          * (A_ONOFF vs. A_ON) requires a full modeset.
6613          */
6614         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6615
6616         if (!fastset) {
6617                 PIPE_CONF_CHECK_I(pipe_src_w);
6618                 PIPE_CONF_CHECK_I(pipe_src_h);
6619
6620                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6621                 if (current_config->pch_pfit.enabled) {
6622                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6623                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6624                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6625                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6626                 }
6627
6628                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6629                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6630
6631                 PIPE_CONF_CHECK_X(gamma_mode);
6632                 if (IS_CHERRYVIEW(dev_priv))
6633                         PIPE_CONF_CHECK_X(cgm_mode);
6634                 else
6635                         PIPE_CONF_CHECK_X(csc_mode);
6636                 PIPE_CONF_CHECK_BOOL(gamma_enable);
6637                 PIPE_CONF_CHECK_BOOL(csc_enable);
6638
6639                 PIPE_CONF_CHECK_I(linetime);
6640                 PIPE_CONF_CHECK_I(ips_linetime);
6641
6642                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6643                 if (bp_gamma)
6644                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6645
6646                 if (current_config->active_planes) {
6647                         PIPE_CONF_CHECK_BOOL(has_psr);
6648                         PIPE_CONF_CHECK_BOOL(has_psr2);
6649                         PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6650                         PIPE_CONF_CHECK_I(dc3co_exitline);
6651                 }
6652         }
6653
6654         PIPE_CONF_CHECK_BOOL(double_wide);
6655
6656         if (dev_priv->dpll.mgr) {
6657                 PIPE_CONF_CHECK_P(shared_dpll);
6658
6659                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6660                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6661                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6662                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6663                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6664                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6665                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6666                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6667                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6668                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6669                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6670                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6671                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6672                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6673                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6674                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6675                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6676                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6677                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6678                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6679                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6680                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6681                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6682                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6683                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6684                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6685                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6686                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6687                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6688                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6689                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6690         }
6691
6692         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6693         PIPE_CONF_CHECK_X(dsi_pll.div);
6694
6695         if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6696                 PIPE_CONF_CHECK_I(pipe_bpp);
6697
6698         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6699         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6700         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6701
6702         PIPE_CONF_CHECK_I(min_voltage_level);
6703
6704         if (current_config->has_psr || pipe_config->has_psr)
6705                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6706                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6707         else
6708                 PIPE_CONF_CHECK_X(infoframes.enable);
6709
6710         PIPE_CONF_CHECK_X(infoframes.gcp);
6711         PIPE_CONF_CHECK_INFOFRAME(avi);
6712         PIPE_CONF_CHECK_INFOFRAME(spd);
6713         PIPE_CONF_CHECK_INFOFRAME(hdmi);
6714         PIPE_CONF_CHECK_INFOFRAME(drm);
6715         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6716
6717         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6718         PIPE_CONF_CHECK_I(master_transcoder);
6719         PIPE_CONF_CHECK_BOOL(bigjoiner);
6720         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6721         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6722
6723         PIPE_CONF_CHECK_I(dsc.compression_enable);
6724         PIPE_CONF_CHECK_I(dsc.dsc_split);
6725         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6726
6727         PIPE_CONF_CHECK_BOOL(splitter.enable);
6728         PIPE_CONF_CHECK_I(splitter.link_count);
6729         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6730
6731         PIPE_CONF_CHECK_I(mst_master_transcoder);
6732
6733         PIPE_CONF_CHECK_BOOL(vrr.enable);
6734         PIPE_CONF_CHECK_I(vrr.vmin);
6735         PIPE_CONF_CHECK_I(vrr.vmax);
6736         PIPE_CONF_CHECK_I(vrr.flipline);
6737         PIPE_CONF_CHECK_I(vrr.pipeline_full);
6738         PIPE_CONF_CHECK_I(vrr.guardband);
6739
6740 #undef PIPE_CONF_CHECK_X
6741 #undef PIPE_CONF_CHECK_I
6742 #undef PIPE_CONF_CHECK_BOOL
6743 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6744 #undef PIPE_CONF_CHECK_P
6745 #undef PIPE_CONF_CHECK_FLAGS
6746 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6747 #undef PIPE_CONF_CHECK_COLOR_LUT
6748 #undef PIPE_CONF_QUIRK
6749
6750         return ret;
6751 }
6752
6753 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6754                                            const struct intel_crtc_state *pipe_config)
6755 {
6756         if (pipe_config->has_pch_encoder) {
6757                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6758                                                             &pipe_config->fdi_m_n);
6759                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6760
6761                 /*
6762                  * FDI already provided one idea for the dotclock.
6763                  * Yell if the encoder disagrees.
6764                  */
6765                 drm_WARN(&dev_priv->drm,
6766                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6767                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6768                          fdi_dotclock, dotclock);
6769         }
6770 }
6771
6772 static void verify_wm_state(struct intel_crtc *crtc,
6773                             struct intel_crtc_state *new_crtc_state)
6774 {
6775         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6776         struct skl_hw_state {
6777                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6778                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6779                 struct skl_pipe_wm wm;
6780         } *hw;
6781         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6782         int level, max_level = ilk_wm_max_level(dev_priv);
6783         struct intel_plane *plane;
6784         u8 hw_enabled_slices;
6785
6786         if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6787                 return;
6788
6789         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6790         if (!hw)
6791                 return;
6792
6793         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6794
6795         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6796
6797         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6798
6799         if (DISPLAY_VER(dev_priv) >= 11 &&
6800             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6801                 drm_err(&dev_priv->drm,
6802                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6803                         dev_priv->dbuf.enabled_slices,
6804                         hw_enabled_slices);
6805
6806         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6807                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6808                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6809
6810                 /* Watermarks */
6811                 for (level = 0; level <= max_level; level++) {
6812                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6813                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6814
6815                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6816                                 continue;
6817
6818                         drm_err(&dev_priv->drm,
6819                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6820                                 plane->base.base.id, plane->base.name, level,
6821                                 sw_wm_level->enable,
6822                                 sw_wm_level->blocks,
6823                                 sw_wm_level->lines,
6824                                 hw_wm_level->enable,
6825                                 hw_wm_level->blocks,
6826                                 hw_wm_level->lines);
6827                 }
6828
6829                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6830                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6831
6832                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6833                         drm_err(&dev_priv->drm,
6834                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6835                                 plane->base.base.id, plane->base.name,
6836                                 sw_wm_level->enable,
6837                                 sw_wm_level->blocks,
6838                                 sw_wm_level->lines,
6839                                 hw_wm_level->enable,
6840                                 hw_wm_level->blocks,
6841                                 hw_wm_level->lines);
6842                 }
6843
6844                 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6845                 sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6846
6847                 if (HAS_HW_SAGV_WM(dev_priv) &&
6848                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6849                         drm_err(&dev_priv->drm,
6850                                 "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6851                                 plane->base.base.id, plane->base.name,
6852                                 sw_wm_level->enable,
6853                                 sw_wm_level->blocks,
6854                                 sw_wm_level->lines,
6855                                 hw_wm_level->enable,
6856                                 hw_wm_level->blocks,
6857                                 hw_wm_level->lines);
6858                 }
6859
6860                 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6861                 sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6862
6863                 if (HAS_HW_SAGV_WM(dev_priv) &&
6864                     !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6865                         drm_err(&dev_priv->drm,
6866                                 "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6867                                 plane->base.base.id, plane->base.name,
6868                                 sw_wm_level->enable,
6869                                 sw_wm_level->blocks,
6870                                 sw_wm_level->lines,
6871                                 hw_wm_level->enable,
6872                                 hw_wm_level->blocks,
6873                                 hw_wm_level->lines);
6874                 }
6875
6876                 /* DDB */
6877                 hw_ddb_entry = &hw->ddb_y[plane->id];
6878                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6879
6880                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6881                         drm_err(&dev_priv->drm,
6882                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6883                                 plane->base.base.id, plane->base.name,
6884                                 sw_ddb_entry->start, sw_ddb_entry->end,
6885                                 hw_ddb_entry->start, hw_ddb_entry->end);
6886                 }
6887         }
6888
6889         kfree(hw);
6890 }
6891
6892 static void
6893 verify_connector_state(struct intel_atomic_state *state,
6894                        struct intel_crtc *crtc)
6895 {
6896         struct drm_connector *connector;
6897         struct drm_connector_state *new_conn_state;
6898         int i;
6899
6900         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6901                 struct drm_encoder *encoder = connector->encoder;
6902                 struct intel_crtc_state *crtc_state = NULL;
6903
6904                 if (new_conn_state->crtc != &crtc->base)
6905                         continue;
6906
6907                 if (crtc)
6908                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6909
6910                 intel_connector_verify_state(crtc_state, new_conn_state);
6911
6912                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6913                      "connector's atomic encoder doesn't match legacy encoder\n");
6914         }
6915 }
6916
6917 static void
6918 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6919 {
6920         struct intel_encoder *encoder;
6921         struct drm_connector *connector;
6922         struct drm_connector_state *old_conn_state, *new_conn_state;
6923         int i;
6924
6925         for_each_intel_encoder(&dev_priv->drm, encoder) {
6926                 bool enabled = false, found = false;
6927                 enum pipe pipe;
6928
6929                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6930                             encoder->base.base.id,
6931                             encoder->base.name);
6932
6933                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6934                                                    new_conn_state, i) {
6935                         if (old_conn_state->best_encoder == &encoder->base)
6936                                 found = true;
6937
6938                         if (new_conn_state->best_encoder != &encoder->base)
6939                                 continue;
6940                         found = enabled = true;
6941
6942                         I915_STATE_WARN(new_conn_state->crtc !=
6943                                         encoder->base.crtc,
6944                              "connector's crtc doesn't match encoder crtc\n");
6945                 }
6946
6947                 if (!found)
6948                         continue;
6949
6950                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
6951                      "encoder's enabled state mismatch "
6952                      "(expected %i, found %i)\n",
6953                      !!encoder->base.crtc, enabled);
6954
6955                 if (!encoder->base.crtc) {
6956                         bool active;
6957
6958                         active = encoder->get_hw_state(encoder, &pipe);
6959                         I915_STATE_WARN(active,
6960                              "encoder detached but still enabled on pipe %c.\n",
6961                              pipe_name(pipe));
6962                 }
6963         }
6964 }
6965
6966 static void
6967 verify_crtc_state(struct intel_crtc *crtc,
6968                   struct intel_crtc_state *old_crtc_state,
6969                   struct intel_crtc_state *new_crtc_state)
6970 {
6971         struct drm_device *dev = crtc->base.dev;
6972         struct drm_i915_private *dev_priv = to_i915(dev);
6973         struct intel_encoder *encoder;
6974         struct intel_crtc_state *pipe_config = old_crtc_state;
6975         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6976         struct intel_crtc *master_crtc;
6977
6978         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6979         intel_crtc_free_hw_state(old_crtc_state);
6980         intel_crtc_state_reset(old_crtc_state, crtc);
6981         old_crtc_state->uapi.state = state;
6982
6983         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6984                     crtc->base.name);
6985
6986         pipe_config->hw.enable = new_crtc_state->hw.enable;
6987
6988         intel_crtc_get_pipe_config(pipe_config);
6989
6990         /* we keep both pipes enabled on 830 */
6991         if (IS_I830(dev_priv) && pipe_config->hw.active)
6992                 pipe_config->hw.active = new_crtc_state->hw.active;
6993
6994         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6995                         "crtc active state doesn't match with hw state "
6996                         "(expected %i, found %i)\n",
6997                         new_crtc_state->hw.active, pipe_config->hw.active);
6998
6999         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
7000                         "transitional active state does not match atomic hw state "
7001                         "(expected %i, found %i)\n",
7002                         new_crtc_state->hw.active, crtc->active);
7003
7004         master_crtc = intel_master_crtc(new_crtc_state);
7005
7006         for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
7007                 enum pipe pipe;
7008                 bool active;
7009
7010                 active = encoder->get_hw_state(encoder, &pipe);
7011                 I915_STATE_WARN(active != new_crtc_state->hw.active,
7012                                 "[ENCODER:%i] active %i with crtc active %i\n",
7013                                 encoder->base.base.id, active,
7014                                 new_crtc_state->hw.active);
7015
7016                 I915_STATE_WARN(active && master_crtc->pipe != pipe,
7017                                 "Encoder connected to wrong pipe %c\n",
7018                                 pipe_name(pipe));
7019
7020                 if (active)
7021                         intel_encoder_get_config(encoder, pipe_config);
7022         }
7023
7024         if (!new_crtc_state->hw.active)
7025                 return;
7026
7027         intel_pipe_config_sanity_check(dev_priv, pipe_config);
7028
7029         if (!intel_pipe_config_compare(new_crtc_state,
7030                                        pipe_config, false)) {
7031                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
7032                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
7033                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7034         }
7035 }
7036
7037 static void
7038 intel_verify_planes(struct intel_atomic_state *state)
7039 {
7040         struct intel_plane *plane;
7041         const struct intel_plane_state *plane_state;
7042         int i;
7043
7044         for_each_new_intel_plane_in_state(state, plane,
7045                                           plane_state, i)
7046                 assert_plane(plane, plane_state->planar_slave ||
7047                              plane_state->uapi.visible);
7048 }
7049
7050 static void
7051 verify_single_dpll_state(struct drm_i915_private *dev_priv,
7052                          struct intel_shared_dpll *pll,
7053                          struct intel_crtc *crtc,
7054                          struct intel_crtc_state *new_crtc_state)
7055 {
7056         struct intel_dpll_hw_state dpll_hw_state;
7057         u8 pipe_mask;
7058         bool active;
7059
7060         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7061
7062         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7063
7064         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7065
7066         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7067                 I915_STATE_WARN(!pll->on && pll->active_mask,
7068                      "pll in active use but not on in sw tracking\n");
7069                 I915_STATE_WARN(pll->on && !pll->active_mask,
7070                      "pll is on but not used by any active pipe\n");
7071                 I915_STATE_WARN(pll->on != active,
7072                      "pll on state mismatch (expected %i, found %i)\n",
7073                      pll->on, active);
7074         }
7075
7076         if (!crtc) {
7077                 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7078                                 "more active pll users than references: 0x%x vs 0x%x\n",
7079                                 pll->active_mask, pll->state.pipe_mask);
7080
7081                 return;
7082         }
7083
7084         pipe_mask = BIT(crtc->pipe);
7085
7086         if (new_crtc_state->hw.active)
7087                 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7088                                 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7089                                 pipe_name(crtc->pipe), pll->active_mask);
7090         else
7091                 I915_STATE_WARN(pll->active_mask & pipe_mask,
7092                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7093                                 pipe_name(crtc->pipe), pll->active_mask);
7094
7095         I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7096                         "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7097                         pipe_mask, pll->state.pipe_mask);
7098
7099         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7100                                           &dpll_hw_state,
7101                                           sizeof(dpll_hw_state)),
7102                         "pll hw state mismatch\n");
7103 }
7104
7105 static void
7106 verify_shared_dpll_state(struct intel_crtc *crtc,
7107                          struct intel_crtc_state *old_crtc_state,
7108                          struct intel_crtc_state *new_crtc_state)
7109 {
7110         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7111
7112         if (new_crtc_state->shared_dpll)
7113                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7114
7115         if (old_crtc_state->shared_dpll &&
7116             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7117                 u8 pipe_mask = BIT(crtc->pipe);
7118                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7119
7120                 I915_STATE_WARN(pll->active_mask & pipe_mask,
7121                                 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7122                                 pipe_name(crtc->pipe), pll->active_mask);
7123                 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7124                                 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7125                                 pipe_name(crtc->pipe), pll->state.pipe_mask);
7126         }
7127 }
7128
7129 static void
7130 verify_mpllb_state(struct intel_atomic_state *state,
7131                    struct intel_crtc_state *new_crtc_state)
7132 {
7133         struct drm_i915_private *i915 = to_i915(state->base.dev);
7134         struct intel_mpllb_state mpllb_hw_state = { 0 };
7135         struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7136         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7137         struct intel_encoder *encoder;
7138
7139         if (!IS_DG2(i915))
7140                 return;
7141
7142         if (!new_crtc_state->hw.active)
7143                 return;
7144
7145         encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7146         intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7147
7148 #define MPLLB_CHECK(name) do { \
7149         if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7150                 pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7151                                      "(expected 0x%08x, found 0x%08x)", \
7152                                      mpllb_sw_state->name, \
7153                                      mpllb_hw_state.name); \
7154         } \
7155 } while (0)
7156
7157         MPLLB_CHECK(mpllb_cp);
7158         MPLLB_CHECK(mpllb_div);
7159         MPLLB_CHECK(mpllb_div2);
7160         MPLLB_CHECK(mpllb_fracn1);
7161         MPLLB_CHECK(mpllb_fracn2);
7162         MPLLB_CHECK(mpllb_sscen);
7163         MPLLB_CHECK(mpllb_sscstep);
7164
7165         /*
7166          * ref_control is handled by the hardware/firemware and never
7167          * programmed by the software, but the proper values are supplied
7168          * in the bspec for verification purposes.
7169          */
7170         MPLLB_CHECK(ref_control);
7171
7172 #undef MPLLB_CHECK
7173 }
7174
7175 static void
7176 intel_modeset_verify_crtc(struct intel_crtc *crtc,
7177                           struct intel_atomic_state *state,
7178                           struct intel_crtc_state *old_crtc_state,
7179                           struct intel_crtc_state *new_crtc_state)
7180 {
7181         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7182                 return;
7183
7184         verify_wm_state(crtc, new_crtc_state);
7185         verify_connector_state(state, crtc);
7186         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7187         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7188         verify_mpllb_state(state, new_crtc_state);
7189 }
7190
7191 static void
7192 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7193 {
7194         int i;
7195
7196         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7197                 verify_single_dpll_state(dev_priv,
7198                                          &dev_priv->dpll.shared_dplls[i],
7199                                          NULL, NULL);
7200 }
7201
7202 static void
7203 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7204                               struct intel_atomic_state *state)
7205 {
7206         verify_encoder_state(dev_priv, state);
7207         verify_connector_state(state, NULL);
7208         verify_disabled_dpll_state(dev_priv);
7209 }
7210
7211 int intel_modeset_all_pipes(struct intel_atomic_state *state)
7212 {
7213         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7214         struct intel_crtc *crtc;
7215
7216         /*
7217          * Add all pipes to the state, and force
7218          * a modeset on all the active ones.
7219          */
7220         for_each_intel_crtc(&dev_priv->drm, crtc) {
7221                 struct intel_crtc_state *crtc_state;
7222                 int ret;
7223
7224                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7225                 if (IS_ERR(crtc_state))
7226                         return PTR_ERR(crtc_state);
7227
7228                 if (!crtc_state->hw.active ||
7229                     drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7230                         continue;
7231
7232                 crtc_state->uapi.mode_changed = true;
7233
7234                 ret = drm_atomic_add_affected_connectors(&state->base,
7235                                                          &crtc->base);
7236                 if (ret)
7237                         return ret;
7238
7239                 ret = intel_atomic_add_affected_planes(state, crtc);
7240                 if (ret)
7241                         return ret;
7242
7243                 crtc_state->update_planes |= crtc_state->active_planes;
7244         }
7245
7246         return 0;
7247 }
7248
7249 static void
7250 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7251 {
7252         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7253         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7254         struct drm_display_mode adjusted_mode =
7255                 crtc_state->hw.adjusted_mode;
7256
7257         if (crtc_state->vrr.enable) {
7258                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7259                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7260                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7261                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7262         }
7263
7264         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7265
7266         crtc->mode_flags = crtc_state->mode_flags;
7267
7268         /*
7269          * The scanline counter increments at the leading edge of hsync.
7270          *
7271          * On most platforms it starts counting from vtotal-1 on the
7272          * first active line. That means the scanline counter value is
7273          * always one less than what we would expect. Ie. just after
7274          * start of vblank, which also occurs at start of hsync (on the
7275          * last active line), the scanline counter will read vblank_start-1.
7276          *
7277          * On gen2 the scanline counter starts counting from 1 instead
7278          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7279          * to keep the value positive), instead of adding one.
7280          *
7281          * On HSW+ the behaviour of the scanline counter depends on the output
7282          * type. For DP ports it behaves like most other platforms, but on HDMI
7283          * there's an extra 1 line difference. So we need to add two instead of
7284          * one to the value.
7285          *
7286          * On VLV/CHV DSI the scanline counter would appear to increment
7287          * approx. 1/3 of a scanline before start of vblank. Unfortunately
7288          * that means we can't tell whether we're in vblank or not while
7289          * we're on that particular line. We must still set scanline_offset
7290          * to 1 so that the vblank timestamps come out correct when we query
7291          * the scanline counter from within the vblank interrupt handler.
7292          * However if queried just before the start of vblank we'll get an
7293          * answer that's slightly in the future.
7294          */
7295         if (DISPLAY_VER(dev_priv) == 2) {
7296                 int vtotal;
7297
7298                 vtotal = adjusted_mode.crtc_vtotal;
7299                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7300                         vtotal /= 2;
7301
7302                 crtc->scanline_offset = vtotal - 1;
7303         } else if (HAS_DDI(dev_priv) &&
7304                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7305                 crtc->scanline_offset = 2;
7306         } else {
7307                 crtc->scanline_offset = 1;
7308         }
7309 }
7310
7311 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7312 {
7313         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7314         struct intel_crtc_state *new_crtc_state;
7315         struct intel_crtc *crtc;
7316         int i;
7317
7318         if (!dev_priv->dpll_funcs)
7319                 return;
7320
7321         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7322                 if (!intel_crtc_needs_modeset(new_crtc_state))
7323                         continue;
7324
7325                 intel_release_shared_dplls(state, crtc);
7326         }
7327 }
7328
7329 /*
7330  * This implements the workaround described in the "notes" section of the mode
7331  * set sequence documentation. When going from no pipes or single pipe to
7332  * multiple pipes, and planes are enabled after the pipe, we need to wait at
7333  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7334  */
7335 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7336 {
7337         struct intel_crtc_state *crtc_state;
7338         struct intel_crtc *crtc;
7339         struct intel_crtc_state *first_crtc_state = NULL;
7340         struct intel_crtc_state *other_crtc_state = NULL;
7341         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7342         int i;
7343
7344         /* look at all crtc's that are going to be enabled in during modeset */
7345         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7346                 if (!crtc_state->hw.active ||
7347                     !intel_crtc_needs_modeset(crtc_state))
7348                         continue;
7349
7350                 if (first_crtc_state) {
7351                         other_crtc_state = crtc_state;
7352                         break;
7353                 } else {
7354                         first_crtc_state = crtc_state;
7355                         first_pipe = crtc->pipe;
7356                 }
7357         }
7358
7359         /* No workaround needed? */
7360         if (!first_crtc_state)
7361                 return 0;
7362
7363         /* w/a possibly needed, check how many crtc's are already enabled. */
7364         for_each_intel_crtc(state->base.dev, crtc) {
7365                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7366                 if (IS_ERR(crtc_state))
7367                         return PTR_ERR(crtc_state);
7368
7369                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7370
7371                 if (!crtc_state->hw.active ||
7372                     intel_crtc_needs_modeset(crtc_state))
7373                         continue;
7374
7375                 /* 2 or more enabled crtcs means no need for w/a */
7376                 if (enabled_pipe != INVALID_PIPE)
7377                         return 0;
7378
7379                 enabled_pipe = crtc->pipe;
7380         }
7381
7382         if (enabled_pipe != INVALID_PIPE)
7383                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7384         else if (other_crtc_state)
7385                 other_crtc_state->hsw_workaround_pipe = first_pipe;
7386
7387         return 0;
7388 }
7389
7390 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7391                            u8 active_pipes)
7392 {
7393         const struct intel_crtc_state *crtc_state;
7394         struct intel_crtc *crtc;
7395         int i;
7396
7397         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7398                 if (crtc_state->hw.active)
7399                         active_pipes |= BIT(crtc->pipe);
7400                 else
7401                         active_pipes &= ~BIT(crtc->pipe);
7402         }
7403
7404         return active_pipes;
7405 }
7406
7407 static int intel_modeset_checks(struct intel_atomic_state *state)
7408 {
7409         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7410
7411         state->modeset = true;
7412
7413         if (IS_HASWELL(dev_priv))
7414                 return hsw_mode_set_planes_workaround(state);
7415
7416         return 0;
7417 }
7418
7419 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7420                                      struct intel_crtc_state *new_crtc_state)
7421 {
7422         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7423                 return;
7424
7425         new_crtc_state->uapi.mode_changed = false;
7426         new_crtc_state->update_pipe = true;
7427 }
7428
7429 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7430                                     struct intel_crtc_state *new_crtc_state)
7431 {
7432         /*
7433          * If we're not doing the full modeset we want to
7434          * keep the current M/N values as they may be
7435          * sufficiently different to the computed values
7436          * to cause problems.
7437          *
7438          * FIXME: should really copy more fuzzy state here
7439          */
7440         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7441         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7442         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7443         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7444 }
7445
7446 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7447                                           struct intel_crtc *crtc,
7448                                           u8 plane_ids_mask)
7449 {
7450         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7451         struct intel_plane *plane;
7452
7453         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7454                 struct intel_plane_state *plane_state;
7455
7456                 if ((plane_ids_mask & BIT(plane->id)) == 0)
7457                         continue;
7458
7459                 plane_state = intel_atomic_get_plane_state(state, plane);
7460                 if (IS_ERR(plane_state))
7461                         return PTR_ERR(plane_state);
7462         }
7463
7464         return 0;
7465 }
7466
7467 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7468                                      struct intel_crtc *crtc)
7469 {
7470         const struct intel_crtc_state *old_crtc_state =
7471                 intel_atomic_get_old_crtc_state(state, crtc);
7472         const struct intel_crtc_state *new_crtc_state =
7473                 intel_atomic_get_new_crtc_state(state, crtc);
7474
7475         return intel_crtc_add_planes_to_state(state, crtc,
7476                                               old_crtc_state->enabled_planes |
7477                                               new_crtc_state->enabled_planes);
7478 }
7479
7480 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7481 {
7482         /* See {hsw,vlv,ivb}_plane_ratio() */
7483         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7484                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7485                 IS_IVYBRIDGE(dev_priv);
7486 }
7487
7488 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7489                                            struct intel_crtc *crtc,
7490                                            struct intel_crtc *other)
7491 {
7492         const struct intel_plane_state *plane_state;
7493         struct intel_plane *plane;
7494         u8 plane_ids = 0;
7495         int i;
7496
7497         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7498                 if (plane->pipe == crtc->pipe)
7499                         plane_ids |= BIT(plane->id);
7500         }
7501
7502         return intel_crtc_add_planes_to_state(state, other, plane_ids);
7503 }
7504
7505 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7506 {
7507         const struct intel_crtc_state *crtc_state;
7508         struct intel_crtc *crtc;
7509         int i;
7510
7511         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7512                 int ret;
7513
7514                 if (!crtc_state->bigjoiner)
7515                         continue;
7516
7517                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7518                                                       crtc_state->bigjoiner_linked_crtc);
7519                 if (ret)
7520                         return ret;
7521         }
7522
7523         return 0;
7524 }
7525
7526 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7527 {
7528         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7529         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7530         struct intel_plane_state *plane_state;
7531         struct intel_plane *plane;
7532         struct intel_crtc *crtc;
7533         int i, ret;
7534
7535         ret = icl_add_linked_planes(state);
7536         if (ret)
7537                 return ret;
7538
7539         ret = intel_bigjoiner_add_affected_planes(state);
7540         if (ret)
7541                 return ret;
7542
7543         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7544                 ret = intel_plane_atomic_check(state, plane);
7545                 if (ret) {
7546                         drm_dbg_atomic(&dev_priv->drm,
7547                                        "[PLANE:%d:%s] atomic driver check failed\n",
7548                                        plane->base.base.id, plane->base.name);
7549                         return ret;
7550                 }
7551         }
7552
7553         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7554                                             new_crtc_state, i) {
7555                 u8 old_active_planes, new_active_planes;
7556
7557                 ret = icl_check_nv12_planes(new_crtc_state);
7558                 if (ret)
7559                         return ret;
7560
7561                 /*
7562                  * On some platforms the number of active planes affects
7563                  * the planes' minimum cdclk calculation. Add such planes
7564                  * to the state before we compute the minimum cdclk.
7565                  */
7566                 if (!active_planes_affects_min_cdclk(dev_priv))
7567                         continue;
7568
7569                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7570                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7571
7572                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
7573                         continue;
7574
7575                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7576                 if (ret)
7577                         return ret;
7578         }
7579
7580         return 0;
7581 }
7582
7583 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7584 {
7585         struct intel_crtc_state *crtc_state;
7586         struct intel_crtc *crtc;
7587         int i;
7588
7589         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7590                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7591                 int ret;
7592
7593                 ret = intel_crtc_atomic_check(state, crtc);
7594                 if (ret) {
7595                         drm_dbg_atomic(&i915->drm,
7596                                        "[CRTC:%d:%s] atomic driver check failed\n",
7597                                        crtc->base.base.id, crtc->base.name);
7598                         return ret;
7599                 }
7600         }
7601
7602         return 0;
7603 }
7604
7605 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7606                                                u8 transcoders)
7607 {
7608         const struct intel_crtc_state *new_crtc_state;
7609         struct intel_crtc *crtc;
7610         int i;
7611
7612         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7613                 if (new_crtc_state->hw.enable &&
7614                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7615                     intel_crtc_needs_modeset(new_crtc_state))
7616                         return true;
7617         }
7618
7619         return false;
7620 }
7621
7622 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7623                                         struct intel_crtc *crtc,
7624                                         struct intel_crtc_state *old_crtc_state,
7625                                         struct intel_crtc_state *new_crtc_state)
7626 {
7627         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7628         struct intel_crtc *slave_crtc, *master_crtc;
7629
7630         /* slave being enabled, is master is still claiming this crtc? */
7631         if (old_crtc_state->bigjoiner_slave) {
7632                 slave_crtc = crtc;
7633                 master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7634                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7635                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7636                         goto claimed;
7637         }
7638
7639         if (!new_crtc_state->bigjoiner)
7640                 return 0;
7641
7642         slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7643         if (!slave_crtc) {
7644                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
7645                               "CRTC + 1 to be used, doesn't exist\n",
7646                               crtc->base.base.id, crtc->base.name);
7647                 return -EINVAL;
7648         }
7649
7650         new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7651         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7652         master_crtc = crtc;
7653         if (IS_ERR(slave_crtc_state))
7654                 return PTR_ERR(slave_crtc_state);
7655
7656         /* master being enabled, slave was already configured? */
7657         if (slave_crtc_state->uapi.enable)
7658                 goto claimed;
7659
7660         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
7661                       slave_crtc->base.base.id, slave_crtc->base.name);
7662
7663         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7664
7665 claimed:
7666         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7667                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7668                       slave_crtc->base.base.id, slave_crtc->base.name,
7669                       master_crtc->base.base.id, master_crtc->base.name);
7670         return -EINVAL;
7671 }
7672
7673 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7674                                  struct intel_crtc_state *master_crtc_state)
7675 {
7676         struct intel_crtc_state *slave_crtc_state =
7677                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7678
7679         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7680         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7681         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7682         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7683 }
7684
7685 /**
7686  * DOC: asynchronous flip implementation
7687  *
7688  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7689  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7690  * Correspondingly, support is currently added for primary plane only.
7691  *
7692  * Async flip can only change the plane surface address, so anything else
7693  * changing is rejected from the intel_atomic_check_async() function.
7694  * Once this check is cleared, flip done interrupt is enabled using
7695  * the intel_crtc_enable_flip_done() function.
7696  *
7697  * As soon as the surface address register is written, flip done interrupt is
7698  * generated and the requested events are sent to the usersapce in the interrupt
7699  * handler itself. The timestamp and sequence sent during the flip done event
7700  * correspond to the last vblank and have no relation to the actual time when
7701  * the flip done event was sent.
7702  */
7703 static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7704 {
7705         struct drm_i915_private *i915 = to_i915(state->base.dev);
7706         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7707         const struct intel_plane_state *new_plane_state, *old_plane_state;
7708         struct intel_plane *plane;
7709         int i;
7710
7711         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7712         new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7713
7714         if (intel_crtc_needs_modeset(new_crtc_state)) {
7715                 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7716                 return -EINVAL;
7717         }
7718
7719         if (!new_crtc_state->hw.active) {
7720                 drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7721                 return -EINVAL;
7722         }
7723         if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7724                 drm_dbg_kms(&i915->drm,
7725                             "Active planes cannot be changed during async flip\n");
7726                 return -EINVAL;
7727         }
7728
7729         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7730                                              new_plane_state, i) {
7731                 if (plane->pipe != crtc->pipe)
7732                         continue;
7733
7734                 /*
7735                  * TODO: Async flip is only supported through the page flip IOCTL
7736                  * as of now. So support currently added for primary plane only.
7737                  * Support for other planes on platforms on which supports
7738                  * this(vlv/chv and icl+) should be added when async flip is
7739                  * enabled in the atomic IOCTL path.
7740                  */
7741                 if (!plane->async_flip)
7742                         return -EINVAL;
7743
7744                 /*
7745                  * FIXME: This check is kept generic for all platforms.
7746                  * Need to verify this for all gen9 platforms to enable
7747                  * this selectively if required.
7748                  */
7749                 switch (new_plane_state->hw.fb->modifier) {
7750                 case I915_FORMAT_MOD_X_TILED:
7751                 case I915_FORMAT_MOD_Y_TILED:
7752                 case I915_FORMAT_MOD_Yf_TILED:
7753                         break;
7754                 default:
7755                         drm_dbg_kms(&i915->drm,
7756                                     "Linear memory/CCS does not support async flips\n");
7757                         return -EINVAL;
7758                 }
7759
7760                 if (new_plane_state->hw.fb->format->num_planes > 1) {
7761                         drm_dbg_kms(&i915->drm,
7762                                     "Planar formats not supported with async flips\n");
7763                         return -EINVAL;
7764                 }
7765
7766                 if (old_plane_state->view.color_plane[0].mapping_stride !=
7767                     new_plane_state->view.color_plane[0].mapping_stride) {
7768                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7769                         return -EINVAL;
7770                 }
7771
7772                 if (old_plane_state->hw.fb->modifier !=
7773                     new_plane_state->hw.fb->modifier) {
7774                         drm_dbg_kms(&i915->drm,
7775                                     "Framebuffer modifiers cannot be changed in async flip\n");
7776                         return -EINVAL;
7777                 }
7778
7779                 if (old_plane_state->hw.fb->format !=
7780                     new_plane_state->hw.fb->format) {
7781                         drm_dbg_kms(&i915->drm,
7782                                     "Framebuffer format cannot be changed in async flip\n");
7783                         return -EINVAL;
7784                 }
7785
7786                 if (old_plane_state->hw.rotation !=
7787                     new_plane_state->hw.rotation) {
7788                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7789                         return -EINVAL;
7790                 }
7791
7792                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7793                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7794                         drm_dbg_kms(&i915->drm,
7795                                     "Plane size/co-ordinates cannot be changed in async flip\n");
7796                         return -EINVAL;
7797                 }
7798
7799                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7800                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7801                         return -EINVAL;
7802                 }
7803
7804                 if (old_plane_state->hw.pixel_blend_mode !=
7805                     new_plane_state->hw.pixel_blend_mode) {
7806                         drm_dbg_kms(&i915->drm,
7807                                     "Pixel blend mode cannot be changed in async flip\n");
7808                         return -EINVAL;
7809                 }
7810
7811                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7812                         drm_dbg_kms(&i915->drm,
7813                                     "Color encoding cannot be changed in async flip\n");
7814                         return -EINVAL;
7815                 }
7816
7817                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7818                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7819                         return -EINVAL;
7820                 }
7821
7822                 /* plane decryption is allow to change only in synchronous flips */
7823                 if (old_plane_state->decrypt != new_plane_state->decrypt)
7824                         return -EINVAL;
7825         }
7826
7827         return 0;
7828 }
7829
7830 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7831 {
7832         struct intel_crtc_state *crtc_state;
7833         struct intel_crtc *crtc;
7834         int i;
7835
7836         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7837                 struct intel_crtc_state *linked_crtc_state;
7838                 struct intel_crtc *linked_crtc;
7839                 int ret;
7840
7841                 if (!crtc_state->bigjoiner)
7842                         continue;
7843
7844                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
7845                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7846                 if (IS_ERR(linked_crtc_state))
7847                         return PTR_ERR(linked_crtc_state);
7848
7849                 if (!intel_crtc_needs_modeset(crtc_state))
7850                         continue;
7851
7852                 linked_crtc_state->uapi.mode_changed = true;
7853
7854                 ret = drm_atomic_add_affected_connectors(&state->base,
7855                                                          &linked_crtc->base);
7856                 if (ret)
7857                         return ret;
7858
7859                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
7860                 if (ret)
7861                         return ret;
7862         }
7863
7864         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7865                 /* Kill old bigjoiner link, we may re-establish afterwards */
7866                 if (intel_crtc_needs_modeset(crtc_state) &&
7867                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7868                         kill_bigjoiner_slave(state, crtc_state);
7869         }
7870
7871         return 0;
7872 }
7873
7874 /**
7875  * intel_atomic_check - validate state object
7876  * @dev: drm device
7877  * @_state: state to validate
7878  */
7879 static int intel_atomic_check(struct drm_device *dev,
7880                               struct drm_atomic_state *_state)
7881 {
7882         struct drm_i915_private *dev_priv = to_i915(dev);
7883         struct intel_atomic_state *state = to_intel_atomic_state(_state);
7884         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7885         struct intel_crtc *crtc;
7886         int ret, i;
7887         bool any_ms = false;
7888
7889         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7890                                             new_crtc_state, i) {
7891                 if (new_crtc_state->inherited != old_crtc_state->inherited)
7892                         new_crtc_state->uapi.mode_changed = true;
7893         }
7894
7895         intel_vrr_check_modeset(state);
7896
7897         ret = drm_atomic_helper_check_modeset(dev, &state->base);
7898         if (ret)
7899                 goto fail;
7900
7901         ret = intel_bigjoiner_add_affected_crtcs(state);
7902         if (ret)
7903                 goto fail;
7904
7905         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7906                                             new_crtc_state, i) {
7907                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7908                         /* Light copy */
7909                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7910
7911                         continue;
7912                 }
7913
7914                 if (!new_crtc_state->uapi.enable) {
7915                         if (!new_crtc_state->bigjoiner_slave) {
7916                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7917                                 any_ms = true;
7918                         }
7919                         continue;
7920                 }
7921
7922                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7923                 if (ret)
7924                         goto fail;
7925
7926                 ret = intel_modeset_pipe_config(state, new_crtc_state);
7927                 if (ret)
7928                         goto fail;
7929
7930                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7931                                                    new_crtc_state);
7932                 if (ret)
7933                         goto fail;
7934         }
7935
7936         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7937                                             new_crtc_state, i) {
7938                 if (!intel_crtc_needs_modeset(new_crtc_state))
7939                         continue;
7940
7941                 ret = intel_modeset_pipe_config_late(new_crtc_state);
7942                 if (ret)
7943                         goto fail;
7944
7945                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7946         }
7947
7948         /**
7949          * Check if fastset is allowed by external dependencies like other
7950          * pipes and transcoders.
7951          *
7952          * Right now it only forces a fullmodeset when the MST master
7953          * transcoder did not changed but the pipe of the master transcoder
7954          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7955          * in case of port synced crtcs, if one of the synced crtcs
7956          * needs a full modeset, all other synced crtcs should be
7957          * forced a full modeset.
7958          */
7959         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7960                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7961                         continue;
7962
7963                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7964                         enum transcoder master = new_crtc_state->mst_master_transcoder;
7965
7966                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7967                                 new_crtc_state->uapi.mode_changed = true;
7968                                 new_crtc_state->update_pipe = false;
7969                         }
7970                 }
7971
7972                 if (is_trans_port_sync_mode(new_crtc_state)) {
7973                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
7974
7975                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7976                                 trans |= BIT(new_crtc_state->master_transcoder);
7977
7978                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
7979                                 new_crtc_state->uapi.mode_changed = true;
7980                                 new_crtc_state->update_pipe = false;
7981                         }
7982                 }
7983
7984                 if (new_crtc_state->bigjoiner) {
7985                         struct intel_crtc_state *linked_crtc_state =
7986                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
7987
7988                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
7989                                 new_crtc_state->uapi.mode_changed = true;
7990                                 new_crtc_state->update_pipe = false;
7991                         }
7992                 }
7993         }
7994
7995         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7996                                             new_crtc_state, i) {
7997                 if (intel_crtc_needs_modeset(new_crtc_state)) {
7998                         any_ms = true;
7999                         continue;
8000                 }
8001
8002                 if (!new_crtc_state->update_pipe)
8003                         continue;
8004
8005                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
8006         }
8007
8008         if (any_ms && !check_digital_port_conflicts(state)) {
8009                 drm_dbg_kms(&dev_priv->drm,
8010                             "rejecting conflicting digital port configuration\n");
8011                 ret = -EINVAL;
8012                 goto fail;
8013         }
8014
8015         ret = drm_dp_mst_atomic_check(&state->base);
8016         if (ret)
8017                 goto fail;
8018
8019         ret = intel_atomic_check_planes(state);
8020         if (ret)
8021                 goto fail;
8022
8023         ret = intel_compute_global_watermarks(state);
8024         if (ret)
8025                 goto fail;
8026
8027         ret = intel_bw_atomic_check(state);
8028         if (ret)
8029                 goto fail;
8030
8031         ret = intel_cdclk_atomic_check(state, &any_ms);
8032         if (ret)
8033                 goto fail;
8034
8035         if (intel_any_crtc_needs_modeset(state))
8036                 any_ms = true;
8037
8038         if (any_ms) {
8039                 ret = intel_modeset_checks(state);
8040                 if (ret)
8041                         goto fail;
8042
8043                 ret = intel_modeset_calc_cdclk(state);
8044                 if (ret)
8045                         return ret;
8046
8047                 intel_modeset_clear_plls(state);
8048         }
8049
8050         ret = intel_atomic_check_crtcs(state);
8051         if (ret)
8052                 goto fail;
8053
8054         ret = intel_fbc_atomic_check(state);
8055         if (ret)
8056                 goto fail;
8057
8058         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8059                                             new_crtc_state, i) {
8060                 if (new_crtc_state->uapi.async_flip) {
8061                         ret = intel_atomic_check_async(state, crtc);
8062                         if (ret)
8063                                 goto fail;
8064                 }
8065
8066                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
8067                     !new_crtc_state->update_pipe)
8068                         continue;
8069
8070                 intel_dump_pipe_config(new_crtc_state, state,
8071                                        intel_crtc_needs_modeset(new_crtc_state) ?
8072                                        "[modeset]" : "[fastset]");
8073         }
8074
8075         return 0;
8076
8077  fail:
8078         if (ret == -EDEADLK)
8079                 return ret;
8080
8081         /*
8082          * FIXME would probably be nice to know which crtc specifically
8083          * caused the failure, in cases where we can pinpoint it.
8084          */
8085         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8086                                             new_crtc_state, i)
8087                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8088
8089         return ret;
8090 }
8091
8092 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8093 {
8094         struct intel_crtc_state *crtc_state;
8095         struct intel_crtc *crtc;
8096         int i, ret;
8097
8098         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8099         if (ret < 0)
8100                 return ret;
8101
8102         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8103                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8104
8105                 if (mode_changed || crtc_state->update_pipe ||
8106                     crtc_state->uapi.color_mgmt_changed) {
8107                         intel_dsb_prepare(crtc_state);
8108                 }
8109         }
8110
8111         return 0;
8112 }
8113
8114 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8115                                   struct intel_crtc_state *crtc_state)
8116 {
8117         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8118
8119         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8120                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8121
8122         if (crtc_state->has_pch_encoder) {
8123                 enum pipe pch_transcoder =
8124                         intel_crtc_pch_transcoder(crtc);
8125
8126                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8127         }
8128 }
8129
8130 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8131                                const struct intel_crtc_state *new_crtc_state)
8132 {
8133         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8134         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8135
8136         /*
8137          * Update pipe size and adjust fitter if needed: the reason for this is
8138          * that in compute_mode_changes we check the native mode (not the pfit
8139          * mode) to see if we can flip rather than do a full mode set. In the
8140          * fastboot case, we'll flip, but if we don't update the pipesrc and
8141          * pfit state, we'll end up with a big fb scanned out into the wrong
8142          * sized surface.
8143          */
8144         intel_set_pipe_src_size(new_crtc_state);
8145
8146         /* on skylake this is done by detaching scalers */
8147         if (DISPLAY_VER(dev_priv) >= 9) {
8148                 if (new_crtc_state->pch_pfit.enabled)
8149                         skl_pfit_enable(new_crtc_state);
8150         } else if (HAS_PCH_SPLIT(dev_priv)) {
8151                 if (new_crtc_state->pch_pfit.enabled)
8152                         ilk_pfit_enable(new_crtc_state);
8153                 else if (old_crtc_state->pch_pfit.enabled)
8154                         ilk_pfit_disable(old_crtc_state);
8155         }
8156
8157         /*
8158          * The register is supposedly single buffered so perhaps
8159          * not 100% correct to do this here. But SKL+ calculate
8160          * this based on the adjust pixel rate so pfit changes do
8161          * affect it and so it must be updated for fastsets.
8162          * HSW/BDW only really need this here for fastboot, after
8163          * that the value should not change without a full modeset.
8164          */
8165         if (DISPLAY_VER(dev_priv) >= 9 ||
8166             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8167                 hsw_set_linetime_wm(new_crtc_state);
8168
8169         if (DISPLAY_VER(dev_priv) >= 11)
8170                 icl_set_pipe_chicken(new_crtc_state);
8171 }
8172
8173 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8174                                    struct intel_crtc *crtc)
8175 {
8176         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8177         const struct intel_crtc_state *old_crtc_state =
8178                 intel_atomic_get_old_crtc_state(state, crtc);
8179         const struct intel_crtc_state *new_crtc_state =
8180                 intel_atomic_get_new_crtc_state(state, crtc);
8181         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8182
8183         /*
8184          * During modesets pipe configuration was programmed as the
8185          * CRTC was enabled.
8186          */
8187         if (!modeset) {
8188                 if (new_crtc_state->uapi.color_mgmt_changed ||
8189                     new_crtc_state->update_pipe)
8190                         intel_color_commit(new_crtc_state);
8191
8192                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8193                         bdw_set_pipemisc(new_crtc_state);
8194
8195                 if (new_crtc_state->update_pipe)
8196                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
8197         }
8198
8199         intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8200
8201         intel_atomic_update_watermarks(state, crtc);
8202 }
8203
8204 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8205                                     struct intel_crtc *crtc)
8206 {
8207         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8208         const struct intel_crtc_state *new_crtc_state =
8209                 intel_atomic_get_new_crtc_state(state, crtc);
8210
8211         /*
8212          * Disable the scaler(s) after the plane(s) so that we don't
8213          * get a catastrophic underrun even if the two operations
8214          * end up happening in two different frames.
8215          */
8216         if (DISPLAY_VER(dev_priv) >= 9 &&
8217             !intel_crtc_needs_modeset(new_crtc_state))
8218                 skl_detach_scalers(new_crtc_state);
8219 }
8220
8221 static void intel_enable_crtc(struct intel_atomic_state *state,
8222                               struct intel_crtc *crtc)
8223 {
8224         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8225         const struct intel_crtc_state *new_crtc_state =
8226                 intel_atomic_get_new_crtc_state(state, crtc);
8227
8228         if (!intel_crtc_needs_modeset(new_crtc_state))
8229                 return;
8230
8231         intel_crtc_update_active_timings(new_crtc_state);
8232
8233         dev_priv->display->crtc_enable(state, crtc);
8234
8235         if (new_crtc_state->bigjoiner_slave)
8236                 return;
8237
8238         /* vblanks work again, re-enable pipe CRC. */
8239         intel_crtc_enable_pipe_crc(crtc);
8240 }
8241
8242 static void intel_update_crtc(struct intel_atomic_state *state,
8243                               struct intel_crtc *crtc)
8244 {
8245         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8246         const struct intel_crtc_state *old_crtc_state =
8247                 intel_atomic_get_old_crtc_state(state, crtc);
8248         struct intel_crtc_state *new_crtc_state =
8249                 intel_atomic_get_new_crtc_state(state, crtc);
8250         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8251
8252         if (!modeset) {
8253                 if (new_crtc_state->preload_luts &&
8254                     (new_crtc_state->uapi.color_mgmt_changed ||
8255                      new_crtc_state->update_pipe))
8256                         intel_color_load_luts(new_crtc_state);
8257
8258                 intel_pre_plane_update(state, crtc);
8259
8260                 if (new_crtc_state->update_pipe)
8261                         intel_encoders_update_pipe(state, crtc);
8262         }
8263
8264         intel_fbc_update(state, crtc);
8265
8266         intel_update_planes_on_crtc(state, crtc);
8267
8268         /* Perform vblank evasion around commit operation */
8269         intel_pipe_update_start(new_crtc_state);
8270
8271         commit_pipe_pre_planes(state, crtc);
8272
8273         if (DISPLAY_VER(dev_priv) >= 9)
8274                 skl_arm_planes_on_crtc(state, crtc);
8275         else
8276                 i9xx_arm_planes_on_crtc(state, crtc);
8277
8278         commit_pipe_post_planes(state, crtc);
8279
8280         intel_pipe_update_end(new_crtc_state);
8281
8282         /*
8283          * We usually enable FIFO underrun interrupts as part of the
8284          * CRTC enable sequence during modesets.  But when we inherit a
8285          * valid pipe configuration from the BIOS we need to take care
8286          * of enabling them on the CRTC's first fastset.
8287          */
8288         if (new_crtc_state->update_pipe && !modeset &&
8289             old_crtc_state->inherited)
8290                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8291 }
8292
8293 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8294                                           struct intel_crtc_state *old_crtc_state,
8295                                           struct intel_crtc_state *new_crtc_state,
8296                                           struct intel_crtc *crtc)
8297 {
8298         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8299
8300         /*
8301          * We need to disable pipe CRC before disabling the pipe,
8302          * or we race against vblank off.
8303          */
8304         intel_crtc_disable_pipe_crc(crtc);
8305
8306         dev_priv->display->crtc_disable(state, crtc);
8307         crtc->active = false;
8308         intel_fbc_disable(crtc);
8309         intel_disable_shared_dpll(old_crtc_state);
8310
8311         /* FIXME unify this for all platforms */
8312         if (!new_crtc_state->hw.active &&
8313             !HAS_GMCH(dev_priv))
8314                 intel_initial_watermarks(state, crtc);
8315 }
8316
8317 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8318 {
8319         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8320         struct intel_crtc *crtc;
8321         u32 handled = 0;
8322         int i;
8323
8324         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8325                                             new_crtc_state, i) {
8326                 if (!intel_crtc_needs_modeset(new_crtc_state))
8327                         continue;
8328
8329                 if (!old_crtc_state->hw.active)
8330                         continue;
8331
8332                 intel_pre_plane_update(state, crtc);
8333                 intel_crtc_disable_planes(state, crtc);
8334         }
8335
8336         /* Only disable port sync and MST slaves */
8337         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8338                                             new_crtc_state, i) {
8339                 if (!intel_crtc_needs_modeset(new_crtc_state))
8340                         continue;
8341
8342                 if (!old_crtc_state->hw.active)
8343                         continue;
8344
8345                 /* In case of Transcoder port Sync master slave CRTCs can be
8346                  * assigned in any order and we need to make sure that
8347                  * slave CRTCs are disabled first and then master CRTC since
8348                  * Slave vblanks are masked till Master Vblanks.
8349                  */
8350                 if (!is_trans_port_sync_slave(old_crtc_state) &&
8351                     !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8352                     !old_crtc_state->bigjoiner_slave)
8353                         continue;
8354
8355                 intel_old_crtc_state_disables(state, old_crtc_state,
8356                                               new_crtc_state, crtc);
8357                 handled |= BIT(crtc->pipe);
8358         }
8359
8360         /* Disable everything else left on */
8361         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8362                                             new_crtc_state, i) {
8363                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
8364                     (handled & BIT(crtc->pipe)))
8365                         continue;
8366
8367                 if (!old_crtc_state->hw.active)
8368                         continue;
8369
8370                 intel_old_crtc_state_disables(state, old_crtc_state,
8371                                               new_crtc_state, crtc);
8372         }
8373 }
8374
8375 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8376 {
8377         struct intel_crtc_state *new_crtc_state;
8378         struct intel_crtc *crtc;
8379         int i;
8380
8381         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8382                 if (!new_crtc_state->hw.active)
8383                         continue;
8384
8385                 intel_enable_crtc(state, crtc);
8386                 intel_update_crtc(state, crtc);
8387         }
8388 }
8389
8390 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8391 {
8392         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8393         struct intel_crtc *crtc;
8394         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8395         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8396         u8 update_pipes = 0, modeset_pipes = 0;
8397         int i;
8398
8399         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8400                 enum pipe pipe = crtc->pipe;
8401
8402                 if (!new_crtc_state->hw.active)
8403                         continue;
8404
8405                 /* ignore allocations for crtc's that have been turned off. */
8406                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
8407                         entries[pipe] = old_crtc_state->wm.skl.ddb;
8408                         update_pipes |= BIT(pipe);
8409                 } else {
8410                         modeset_pipes |= BIT(pipe);
8411                 }
8412         }
8413
8414         /*
8415          * Whenever the number of active pipes changes, we need to make sure we
8416          * update the pipes in the right order so that their ddb allocations
8417          * never overlap with each other between CRTC updates. Otherwise we'll
8418          * cause pipe underruns and other bad stuff.
8419          *
8420          * So first lets enable all pipes that do not need a fullmodeset as
8421          * those don't have any external dependency.
8422          */
8423         while (update_pipes) {
8424                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8425                                                     new_crtc_state, i) {
8426                         enum pipe pipe = crtc->pipe;
8427
8428                         if ((update_pipes & BIT(pipe)) == 0)
8429                                 continue;
8430
8431                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8432                                                         entries, I915_MAX_PIPES, pipe))
8433                                 continue;
8434
8435                         entries[pipe] = new_crtc_state->wm.skl.ddb;
8436                         update_pipes &= ~BIT(pipe);
8437
8438                         intel_update_crtc(state, crtc);
8439
8440                         /*
8441                          * If this is an already active pipe, it's DDB changed,
8442                          * and this isn't the last pipe that needs updating
8443                          * then we need to wait for a vblank to pass for the
8444                          * new ddb allocation to take effect.
8445                          */
8446                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8447                                                  &old_crtc_state->wm.skl.ddb) &&
8448                             (update_pipes | modeset_pipes))
8449                                 intel_crtc_wait_for_next_vblank(crtc);
8450                 }
8451         }
8452
8453         update_pipes = modeset_pipes;
8454
8455         /*
8456          * Enable all pipes that needs a modeset and do not depends on other
8457          * pipes
8458          */
8459         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8460                 enum pipe pipe = crtc->pipe;
8461
8462                 if ((modeset_pipes & BIT(pipe)) == 0)
8463                         continue;
8464
8465                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8466                     is_trans_port_sync_master(new_crtc_state) ||
8467                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8468                         continue;
8469
8470                 modeset_pipes &= ~BIT(pipe);
8471
8472                 intel_enable_crtc(state, crtc);
8473         }
8474
8475         /*
8476          * Then we enable all remaining pipes that depend on other
8477          * pipes: MST slaves and port sync masters, big joiner master
8478          */
8479         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8480                 enum pipe pipe = crtc->pipe;
8481
8482                 if ((modeset_pipes & BIT(pipe)) == 0)
8483                         continue;
8484
8485                 modeset_pipes &= ~BIT(pipe);
8486
8487                 intel_enable_crtc(state, crtc);
8488         }
8489
8490         /*
8491          * Finally we do the plane updates/etc. for all pipes that got enabled.
8492          */
8493         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8494                 enum pipe pipe = crtc->pipe;
8495
8496                 if ((update_pipes & BIT(pipe)) == 0)
8497                         continue;
8498
8499                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8500                                                                         entries, I915_MAX_PIPES, pipe));
8501
8502                 entries[pipe] = new_crtc_state->wm.skl.ddb;
8503                 update_pipes &= ~BIT(pipe);
8504
8505                 intel_update_crtc(state, crtc);
8506         }
8507
8508         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8509         drm_WARN_ON(&dev_priv->drm, update_pipes);
8510 }
8511
8512 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8513 {
8514         struct intel_atomic_state *state, *next;
8515         struct llist_node *freed;
8516
8517         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8518         llist_for_each_entry_safe(state, next, freed, freed)
8519                 drm_atomic_state_put(&state->base);
8520 }
8521
8522 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8523 {
8524         struct drm_i915_private *dev_priv =
8525                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8526
8527         intel_atomic_helper_free_state(dev_priv);
8528 }
8529
8530 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8531 {
8532         struct wait_queue_entry wait_fence, wait_reset;
8533         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8534
8535         init_wait_entry(&wait_fence, 0);
8536         init_wait_entry(&wait_reset, 0);
8537         for (;;) {
8538                 prepare_to_wait(&intel_state->commit_ready.wait,
8539                                 &wait_fence, TASK_UNINTERRUPTIBLE);
8540                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
8541                                               I915_RESET_MODESET),
8542                                 &wait_reset, TASK_UNINTERRUPTIBLE);
8543
8544
8545                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
8546                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
8547                         break;
8548
8549                 schedule();
8550         }
8551         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8552         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
8553                                   I915_RESET_MODESET),
8554                     &wait_reset);
8555 }
8556
8557 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8558 {
8559         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8560         struct intel_crtc *crtc;
8561         int i;
8562
8563         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8564                                             new_crtc_state, i)
8565                 intel_dsb_cleanup(old_crtc_state);
8566 }
8567
8568 static void intel_atomic_cleanup_work(struct work_struct *work)
8569 {
8570         struct intel_atomic_state *state =
8571                 container_of(work, struct intel_atomic_state, base.commit_work);
8572         struct drm_i915_private *i915 = to_i915(state->base.dev);
8573
8574         intel_cleanup_dsbs(state);
8575         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8576         drm_atomic_helper_commit_cleanup_done(&state->base);
8577         drm_atomic_state_put(&state->base);
8578
8579         intel_atomic_helper_free_state(i915);
8580 }
8581
8582 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8583 {
8584         struct drm_i915_private *i915 = to_i915(state->base.dev);
8585         struct intel_plane *plane;
8586         struct intel_plane_state *plane_state;
8587         int i;
8588
8589         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8590                 struct drm_framebuffer *fb = plane_state->hw.fb;
8591                 int cc_plane;
8592                 int ret;
8593
8594                 if (!fb)
8595                         continue;
8596
8597                 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8598                 if (cc_plane < 0)
8599                         continue;
8600
8601                 /*
8602                  * The layout of the fast clear color value expected by HW
8603                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8604                  * - 4 x 4 bytes per-channel value
8605                  *   (in surface type specific float/int format provided by the fb user)
8606                  * - 8 bytes native color value used by the display
8607                  *   (converted/written by GPU during a fast clear operation using the
8608                  *    above per-channel values)
8609                  *
8610                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
8611                  * caller made sure that the object is synced wrt. the related color clear value
8612                  * GPU write on it.
8613                  */
8614                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8615                                                      fb->offsets[cc_plane] + 16,
8616                                                      &plane_state->ccval,
8617                                                      sizeof(plane_state->ccval));
8618                 /* The above could only fail if the FB obj has an unexpected backing store type. */
8619                 drm_WARN_ON(&i915->drm, ret);
8620         }
8621 }
8622
8623 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8624 {
8625         struct drm_device *dev = state->base.dev;
8626         struct drm_i915_private *dev_priv = to_i915(dev);
8627         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8628         struct intel_crtc *crtc;
8629         u64 put_domains[I915_MAX_PIPES] = {};
8630         intel_wakeref_t wakeref = 0;
8631         int i;
8632
8633         intel_atomic_commit_fence_wait(state);
8634
8635         drm_atomic_helper_wait_for_dependencies(&state->base);
8636
8637         if (state->modeset)
8638                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8639
8640         intel_atomic_prepare_plane_clear_colors(state);
8641
8642         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8643                                             new_crtc_state, i) {
8644                 if (intel_crtc_needs_modeset(new_crtc_state) ||
8645                     new_crtc_state->update_pipe) {
8646
8647                         put_domains[crtc->pipe] =
8648                                 modeset_get_crtc_power_domains(new_crtc_state);
8649                 }
8650         }
8651
8652         intel_commit_modeset_disables(state);
8653
8654         /* FIXME: Eventually get rid of our crtc->config pointer */
8655         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8656                 crtc->config = new_crtc_state;
8657
8658         if (state->modeset) {
8659                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8660
8661                 intel_set_cdclk_pre_plane_update(state);
8662
8663                 intel_modeset_verify_disabled(dev_priv, state);
8664         }
8665
8666         intel_sagv_pre_plane_update(state);
8667
8668         /* Complete the events for pipes that have now been disabled */
8669         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8670                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8671
8672                 /* Complete events for now disable pipes here. */
8673                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8674                         spin_lock_irq(&dev->event_lock);
8675                         drm_crtc_send_vblank_event(&crtc->base,
8676                                                    new_crtc_state->uapi.event);
8677                         spin_unlock_irq(&dev->event_lock);
8678
8679                         new_crtc_state->uapi.event = NULL;
8680                 }
8681         }
8682
8683         intel_encoders_update_prepare(state);
8684
8685         intel_dbuf_pre_plane_update(state);
8686
8687         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8688                 if (new_crtc_state->uapi.async_flip)
8689                         intel_crtc_enable_flip_done(state, crtc);
8690         }
8691
8692         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8693         dev_priv->display->commit_modeset_enables(state);
8694
8695         intel_encoders_update_complete(state);
8696
8697         if (state->modeset)
8698                 intel_set_cdclk_post_plane_update(state);
8699
8700         intel_wait_for_vblank_workers(state);
8701
8702         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8703          * already, but still need the state for the delayed optimization. To
8704          * fix this:
8705          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8706          * - schedule that vblank worker _before_ calling hw_done
8707          * - at the start of commit_tail, cancel it _synchrously
8708          * - switch over to the vblank wait helper in the core after that since
8709          *   we don't need out special handling any more.
8710          */
8711         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8712
8713         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8714                 if (new_crtc_state->uapi.async_flip)
8715                         intel_crtc_disable_flip_done(state, crtc);
8716         }
8717
8718         /*
8719          * Now that the vblank has passed, we can go ahead and program the
8720          * optimal watermarks on platforms that need two-step watermark
8721          * programming.
8722          *
8723          * TODO: Move this (and other cleanup) to an async worker eventually.
8724          */
8725         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8726                                             new_crtc_state, i) {
8727                 /*
8728                  * Gen2 reports pipe underruns whenever all planes are disabled.
8729                  * So re-enable underrun reporting after some planes get enabled.
8730                  *
8731                  * We do this before .optimize_watermarks() so that we have a
8732                  * chance of catching underruns with the intermediate watermarks
8733                  * vs. the new plane configuration.
8734                  */
8735                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8736                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8737
8738                 intel_optimize_watermarks(state, crtc);
8739         }
8740
8741         intel_dbuf_post_plane_update(state);
8742         intel_psr_post_plane_update(state);
8743
8744         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8745                 intel_post_plane_update(state, crtc);
8746
8747                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8748
8749                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8750
8751                 /*
8752                  * DSB cleanup is done in cleanup_work aligning with framebuffer
8753                  * cleanup. So copy and reset the dsb structure to sync with
8754                  * commit_done and later do dsb cleanup in cleanup_work.
8755                  */
8756                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8757         }
8758
8759         /* Underruns don't always raise interrupts, so check manually */
8760         intel_check_cpu_fifo_underruns(dev_priv);
8761         intel_check_pch_fifo_underruns(dev_priv);
8762
8763         if (state->modeset)
8764                 intel_verify_planes(state);
8765
8766         intel_sagv_post_plane_update(state);
8767
8768         drm_atomic_helper_commit_hw_done(&state->base);
8769
8770         if (state->modeset) {
8771                 /* As one of the primary mmio accessors, KMS has a high
8772                  * likelihood of triggering bugs in unclaimed access. After we
8773                  * finish modesetting, see if an error has been flagged, and if
8774                  * so enable debugging for the next modeset - and hope we catch
8775                  * the culprit.
8776                  */
8777                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8778                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8779         }
8780         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8781
8782         /*
8783          * Defer the cleanup of the old state to a separate worker to not
8784          * impede the current task (userspace for blocking modesets) that
8785          * are executed inline. For out-of-line asynchronous modesets/flips,
8786          * deferring to a new worker seems overkill, but we would place a
8787          * schedule point (cond_resched()) here anyway to keep latencies
8788          * down.
8789          */
8790         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8791         queue_work(system_highpri_wq, &state->base.commit_work);
8792 }
8793
8794 static void intel_atomic_commit_work(struct work_struct *work)
8795 {
8796         struct intel_atomic_state *state =
8797                 container_of(work, struct intel_atomic_state, base.commit_work);
8798
8799         intel_atomic_commit_tail(state);
8800 }
8801
8802 static int __i915_sw_fence_call
8803 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8804                           enum i915_sw_fence_notify notify)
8805 {
8806         struct intel_atomic_state *state =
8807                 container_of(fence, struct intel_atomic_state, commit_ready);
8808
8809         switch (notify) {
8810         case FENCE_COMPLETE:
8811                 /* we do blocking waits in the worker, nothing to do here */
8812                 break;
8813         case FENCE_FREE:
8814                 {
8815                         struct intel_atomic_helper *helper =
8816                                 &to_i915(state->base.dev)->atomic_helper;
8817
8818                         if (llist_add(&state->freed, &helper->free_list))
8819                                 schedule_work(&helper->free_work);
8820                         break;
8821                 }
8822         }
8823
8824         return NOTIFY_DONE;
8825 }
8826
8827 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8828 {
8829         struct intel_plane_state *old_plane_state, *new_plane_state;
8830         struct intel_plane *plane;
8831         int i;
8832
8833         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8834                                              new_plane_state, i)
8835                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8836                                         to_intel_frontbuffer(new_plane_state->hw.fb),
8837                                         plane->frontbuffer_bit);
8838 }
8839
8840 static int intel_atomic_commit(struct drm_device *dev,
8841                                struct drm_atomic_state *_state,
8842                                bool nonblock)
8843 {
8844         struct intel_atomic_state *state = to_intel_atomic_state(_state);
8845         struct drm_i915_private *dev_priv = to_i915(dev);
8846         int ret = 0;
8847
8848         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8849
8850         drm_atomic_state_get(&state->base);
8851         i915_sw_fence_init(&state->commit_ready,
8852                            intel_atomic_commit_ready);
8853
8854         /*
8855          * The intel_legacy_cursor_update() fast path takes care
8856          * of avoiding the vblank waits for simple cursor
8857          * movement and flips. For cursor on/off and size changes,
8858          * we want to perform the vblank waits so that watermark
8859          * updates happen during the correct frames. Gen9+ have
8860          * double buffered watermarks and so shouldn't need this.
8861          *
8862          * Unset state->legacy_cursor_update before the call to
8863          * drm_atomic_helper_setup_commit() because otherwise
8864          * drm_atomic_helper_wait_for_flip_done() is a noop and
8865          * we get FIFO underruns because we didn't wait
8866          * for vblank.
8867          *
8868          * FIXME doing watermarks and fb cleanup from a vblank worker
8869          * (assuming we had any) would solve these problems.
8870          */
8871         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8872                 struct intel_crtc_state *new_crtc_state;
8873                 struct intel_crtc *crtc;
8874                 int i;
8875
8876                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8877                         if (new_crtc_state->wm.need_postvbl_update ||
8878                             new_crtc_state->update_wm_post)
8879                                 state->base.legacy_cursor_update = false;
8880         }
8881
8882         ret = intel_atomic_prepare_commit(state);
8883         if (ret) {
8884                 drm_dbg_atomic(&dev_priv->drm,
8885                                "Preparing state failed with %i\n", ret);
8886                 i915_sw_fence_commit(&state->commit_ready);
8887                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8888                 return ret;
8889         }
8890
8891         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8892         if (!ret)
8893                 ret = drm_atomic_helper_swap_state(&state->base, true);
8894         if (!ret)
8895                 intel_atomic_swap_global_state(state);
8896
8897         if (ret) {
8898                 struct intel_crtc_state *new_crtc_state;
8899                 struct intel_crtc *crtc;
8900                 int i;
8901
8902                 i915_sw_fence_commit(&state->commit_ready);
8903
8904                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8905                         intel_dsb_cleanup(new_crtc_state);
8906
8907                 drm_atomic_helper_cleanup_planes(dev, &state->base);
8908                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8909                 return ret;
8910         }
8911         intel_shared_dpll_swap_state(state);
8912         intel_atomic_track_fbs(state);
8913
8914         drm_atomic_state_get(&state->base);
8915         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8916
8917         i915_sw_fence_commit(&state->commit_ready);
8918         if (nonblock && state->modeset) {
8919                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8920         } else if (nonblock) {
8921                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
8922         } else {
8923                 if (state->modeset)
8924                         flush_workqueue(dev_priv->modeset_wq);
8925                 intel_atomic_commit_tail(state);
8926         }
8927
8928         return 0;
8929 }
8930
8931 /**
8932  * intel_plane_destroy - destroy a plane
8933  * @plane: plane to destroy
8934  *
8935  * Common destruction function for all types of planes (primary, cursor,
8936  * sprite).
8937  */
8938 void intel_plane_destroy(struct drm_plane *plane)
8939 {
8940         drm_plane_cleanup(plane);
8941         kfree(to_intel_plane(plane));
8942 }
8943
8944 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8945 {
8946         struct intel_plane *plane;
8947
8948         for_each_intel_plane(&dev_priv->drm, plane) {
8949                 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
8950                                                               plane->pipe);
8951
8952                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8953         }
8954 }
8955
8956
8957 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8958                                       struct drm_file *file)
8959 {
8960         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8961         struct drm_crtc *drmmode_crtc;
8962         struct intel_crtc *crtc;
8963
8964         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8965         if (!drmmode_crtc)
8966                 return -ENOENT;
8967
8968         crtc = to_intel_crtc(drmmode_crtc);
8969         pipe_from_crtc_id->pipe = crtc->pipe;
8970
8971         return 0;
8972 }
8973
8974 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8975 {
8976         struct drm_device *dev = encoder->base.dev;
8977         struct intel_encoder *source_encoder;
8978         u32 possible_clones = 0;
8979
8980         for_each_intel_encoder(dev, source_encoder) {
8981                 if (encoders_cloneable(encoder, source_encoder))
8982                         possible_clones |= drm_encoder_mask(&source_encoder->base);
8983         }
8984
8985         return possible_clones;
8986 }
8987
8988 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
8989 {
8990         struct drm_device *dev = encoder->base.dev;
8991         struct intel_crtc *crtc;
8992         u32 possible_crtcs = 0;
8993
8994         for_each_intel_crtc(dev, crtc) {
8995                 if (encoder->pipe_mask & BIT(crtc->pipe))
8996                         possible_crtcs |= drm_crtc_mask(&crtc->base);
8997         }
8998
8999         return possible_crtcs;
9000 }
9001
9002 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
9003 {
9004         if (!IS_MOBILE(dev_priv))
9005                 return false;
9006
9007         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
9008                 return false;
9009
9010         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
9011                 return false;
9012
9013         return true;
9014 }
9015
9016 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
9017 {
9018         if (DISPLAY_VER(dev_priv) >= 9)
9019                 return false;
9020
9021         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
9022                 return false;
9023
9024         if (HAS_PCH_LPT_H(dev_priv) &&
9025             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
9026                 return false;
9027
9028         /* DDI E can't be used if DDI A requires 4 lanes */
9029         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9030                 return false;
9031
9032         if (!dev_priv->vbt.int_crt_support)
9033                 return false;
9034
9035         return true;
9036 }
9037
9038 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9039 {
9040         struct intel_encoder *encoder;
9041         bool dpd_is_edp = false;
9042
9043         intel_pps_unlock_regs_wa(dev_priv);
9044
9045         if (!HAS_DISPLAY(dev_priv))
9046                 return;
9047
9048         if (IS_DG2(dev_priv)) {
9049                 intel_ddi_init(dev_priv, PORT_A);
9050                 intel_ddi_init(dev_priv, PORT_B);
9051                 intel_ddi_init(dev_priv, PORT_C);
9052                 intel_ddi_init(dev_priv, PORT_D_XELPD);
9053         } else if (IS_ALDERLAKE_P(dev_priv)) {
9054                 intel_ddi_init(dev_priv, PORT_A);
9055                 intel_ddi_init(dev_priv, PORT_B);
9056                 intel_ddi_init(dev_priv, PORT_TC1);
9057                 intel_ddi_init(dev_priv, PORT_TC2);
9058                 intel_ddi_init(dev_priv, PORT_TC3);
9059                 intel_ddi_init(dev_priv, PORT_TC4);
9060                 icl_dsi_init(dev_priv);
9061         } else if (IS_ALDERLAKE_S(dev_priv)) {
9062                 intel_ddi_init(dev_priv, PORT_A);
9063                 intel_ddi_init(dev_priv, PORT_TC1);
9064                 intel_ddi_init(dev_priv, PORT_TC2);
9065                 intel_ddi_init(dev_priv, PORT_TC3);
9066                 intel_ddi_init(dev_priv, PORT_TC4);
9067         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9068                 intel_ddi_init(dev_priv, PORT_A);
9069                 intel_ddi_init(dev_priv, PORT_B);
9070                 intel_ddi_init(dev_priv, PORT_TC1);
9071                 intel_ddi_init(dev_priv, PORT_TC2);
9072         } else if (DISPLAY_VER(dev_priv) >= 12) {
9073                 intel_ddi_init(dev_priv, PORT_A);
9074                 intel_ddi_init(dev_priv, PORT_B);
9075                 intel_ddi_init(dev_priv, PORT_TC1);
9076                 intel_ddi_init(dev_priv, PORT_TC2);
9077                 intel_ddi_init(dev_priv, PORT_TC3);
9078                 intel_ddi_init(dev_priv, PORT_TC4);
9079                 intel_ddi_init(dev_priv, PORT_TC5);
9080                 intel_ddi_init(dev_priv, PORT_TC6);
9081                 icl_dsi_init(dev_priv);
9082         } else if (IS_JSL_EHL(dev_priv)) {
9083                 intel_ddi_init(dev_priv, PORT_A);
9084                 intel_ddi_init(dev_priv, PORT_B);
9085                 intel_ddi_init(dev_priv, PORT_C);
9086                 intel_ddi_init(dev_priv, PORT_D);
9087                 icl_dsi_init(dev_priv);
9088         } else if (DISPLAY_VER(dev_priv) == 11) {
9089                 intel_ddi_init(dev_priv, PORT_A);
9090                 intel_ddi_init(dev_priv, PORT_B);
9091                 intel_ddi_init(dev_priv, PORT_C);
9092                 intel_ddi_init(dev_priv, PORT_D);
9093                 intel_ddi_init(dev_priv, PORT_E);
9094                 intel_ddi_init(dev_priv, PORT_F);
9095                 icl_dsi_init(dev_priv);
9096         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9097                 intel_ddi_init(dev_priv, PORT_A);
9098                 intel_ddi_init(dev_priv, PORT_B);
9099                 intel_ddi_init(dev_priv, PORT_C);
9100                 vlv_dsi_init(dev_priv);
9101         } else if (DISPLAY_VER(dev_priv) >= 9) {
9102                 intel_ddi_init(dev_priv, PORT_A);
9103                 intel_ddi_init(dev_priv, PORT_B);
9104                 intel_ddi_init(dev_priv, PORT_C);
9105                 intel_ddi_init(dev_priv, PORT_D);
9106                 intel_ddi_init(dev_priv, PORT_E);
9107         } else if (HAS_DDI(dev_priv)) {
9108                 u32 found;
9109
9110                 if (intel_ddi_crt_present(dev_priv))
9111                         intel_crt_init(dev_priv);
9112
9113                 /* Haswell uses DDI functions to detect digital outputs. */
9114                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9115                 if (found)
9116                         intel_ddi_init(dev_priv, PORT_A);
9117
9118                 found = intel_de_read(dev_priv, SFUSE_STRAP);
9119                 if (found & SFUSE_STRAP_DDIB_DETECTED)
9120                         intel_ddi_init(dev_priv, PORT_B);
9121                 if (found & SFUSE_STRAP_DDIC_DETECTED)
9122                         intel_ddi_init(dev_priv, PORT_C);
9123                 if (found & SFUSE_STRAP_DDID_DETECTED)
9124                         intel_ddi_init(dev_priv, PORT_D);
9125                 if (found & SFUSE_STRAP_DDIF_DETECTED)
9126                         intel_ddi_init(dev_priv, PORT_F);
9127         } else if (HAS_PCH_SPLIT(dev_priv)) {
9128                 int found;
9129
9130                 /*
9131                  * intel_edp_init_connector() depends on this completing first,
9132                  * to prevent the registration of both eDP and LVDS and the
9133                  * incorrect sharing of the PPS.
9134                  */
9135                 intel_lvds_init(dev_priv);
9136                 intel_crt_init(dev_priv);
9137
9138                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9139
9140                 if (ilk_has_edp_a(dev_priv))
9141                         g4x_dp_init(dev_priv, DP_A, PORT_A);
9142
9143                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9144                         /* PCH SDVOB multiplex with HDMIB */
9145                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9146                         if (!found)
9147                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9148                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9149                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9150                 }
9151
9152                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9153                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9154
9155                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9156                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9157
9158                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9159                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9160
9161                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9162                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9163         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9164                 bool has_edp, has_port;
9165
9166                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9167                         intel_crt_init(dev_priv);
9168
9169                 /*
9170                  * The DP_DETECTED bit is the latched state of the DDC
9171                  * SDA pin at boot. However since eDP doesn't require DDC
9172                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
9173                  * eDP ports may have been muxed to an alternate function.
9174                  * Thus we can't rely on the DP_DETECTED bit alone to detect
9175                  * eDP ports. Consult the VBT as well as DP_DETECTED to
9176                  * detect eDP ports.
9177                  *
9178                  * Sadly the straps seem to be missing sometimes even for HDMI
9179                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9180                  * and VBT for the presence of the port. Additionally we can't
9181                  * trust the port type the VBT declares as we've seen at least
9182                  * HDMI ports that the VBT claim are DP or eDP.
9183                  */
9184                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9185                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9186                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9187                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9188                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9189                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9190
9191                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9192                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9193                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9194                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9195                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9196                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9197
9198                 if (IS_CHERRYVIEW(dev_priv)) {
9199                         /*
9200                          * eDP not supported on port D,
9201                          * so no need to worry about it
9202                          */
9203                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9204                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9205                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9206                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9207                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9208                 }
9209
9210                 vlv_dsi_init(dev_priv);
9211         } else if (IS_PINEVIEW(dev_priv)) {
9212                 intel_lvds_init(dev_priv);
9213                 intel_crt_init(dev_priv);
9214         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9215                 bool found = false;
9216
9217                 if (IS_MOBILE(dev_priv))
9218                         intel_lvds_init(dev_priv);
9219
9220                 intel_crt_init(dev_priv);
9221
9222                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9223                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9224                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9225                         if (!found && IS_G4X(dev_priv)) {
9226                                 drm_dbg_kms(&dev_priv->drm,
9227                                             "probing HDMI on SDVOB\n");
9228                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9229                         }
9230
9231                         if (!found && IS_G4X(dev_priv))
9232                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
9233                 }
9234
9235                 /* Before G4X SDVOC doesn't have its own detect register */
9236
9237                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9238                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9239                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9240                 }
9241
9242                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9243
9244                         if (IS_G4X(dev_priv)) {
9245                                 drm_dbg_kms(&dev_priv->drm,
9246                                             "probing HDMI on SDVOC\n");
9247                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9248                         }
9249                         if (IS_G4X(dev_priv))
9250                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
9251                 }
9252
9253                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9254                         g4x_dp_init(dev_priv, DP_D, PORT_D);
9255
9256                 if (SUPPORTS_TV(dev_priv))
9257                         intel_tv_init(dev_priv);
9258         } else if (DISPLAY_VER(dev_priv) == 2) {
9259                 if (IS_I85X(dev_priv))
9260                         intel_lvds_init(dev_priv);
9261
9262                 intel_crt_init(dev_priv);
9263                 intel_dvo_init(dev_priv);
9264         }
9265
9266         for_each_intel_encoder(&dev_priv->drm, encoder) {
9267                 encoder->base.possible_crtcs =
9268                         intel_encoder_possible_crtcs(encoder);
9269                 encoder->base.possible_clones =
9270                         intel_encoder_possible_clones(encoder);
9271         }
9272
9273         intel_init_pch_refclk(dev_priv);
9274
9275         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9276 }
9277
9278 static enum drm_mode_status
9279 intel_mode_valid(struct drm_device *dev,
9280                  const struct drm_display_mode *mode)
9281 {
9282         struct drm_i915_private *dev_priv = to_i915(dev);
9283         int hdisplay_max, htotal_max;
9284         int vdisplay_max, vtotal_max;
9285
9286         /*
9287          * Can't reject DBLSCAN here because Xorg ddxen can add piles
9288          * of DBLSCAN modes to the output's mode list when they detect
9289          * the scaling mode property on the connector. And they don't
9290          * ask the kernel to validate those modes in any way until
9291          * modeset time at which point the client gets a protocol error.
9292          * So in order to not upset those clients we silently ignore the
9293          * DBLSCAN flag on such connectors. For other connectors we will
9294          * reject modes with the DBLSCAN flag in encoder->compute_config().
9295          * And we always reject DBLSCAN modes in connector->mode_valid()
9296          * as we never want such modes on the connector's mode list.
9297          */
9298
9299         if (mode->vscan > 1)
9300                 return MODE_NO_VSCAN;
9301
9302         if (mode->flags & DRM_MODE_FLAG_HSKEW)
9303                 return MODE_H_ILLEGAL;
9304
9305         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9306                            DRM_MODE_FLAG_NCSYNC |
9307                            DRM_MODE_FLAG_PCSYNC))
9308                 return MODE_HSYNC;
9309
9310         if (mode->flags & (DRM_MODE_FLAG_BCAST |
9311                            DRM_MODE_FLAG_PIXMUX |
9312                            DRM_MODE_FLAG_CLKDIV2))
9313                 return MODE_BAD;
9314
9315         /* Transcoder timing limits */
9316         if (DISPLAY_VER(dev_priv) >= 11) {
9317                 hdisplay_max = 16384;
9318                 vdisplay_max = 8192;
9319                 htotal_max = 16384;
9320                 vtotal_max = 8192;
9321         } else if (DISPLAY_VER(dev_priv) >= 9 ||
9322                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9323                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9324                 vdisplay_max = 4096;
9325                 htotal_max = 8192;
9326                 vtotal_max = 8192;
9327         } else if (DISPLAY_VER(dev_priv) >= 3) {
9328                 hdisplay_max = 4096;
9329                 vdisplay_max = 4096;
9330                 htotal_max = 8192;
9331                 vtotal_max = 8192;
9332         } else {
9333                 hdisplay_max = 2048;
9334                 vdisplay_max = 2048;
9335                 htotal_max = 4096;
9336                 vtotal_max = 4096;
9337         }
9338
9339         if (mode->hdisplay > hdisplay_max ||
9340             mode->hsync_start > htotal_max ||
9341             mode->hsync_end > htotal_max ||
9342             mode->htotal > htotal_max)
9343                 return MODE_H_ILLEGAL;
9344
9345         if (mode->vdisplay > vdisplay_max ||
9346             mode->vsync_start > vtotal_max ||
9347             mode->vsync_end > vtotal_max ||
9348             mode->vtotal > vtotal_max)
9349                 return MODE_V_ILLEGAL;
9350
9351         if (DISPLAY_VER(dev_priv) >= 5) {
9352                 if (mode->hdisplay < 64 ||
9353                     mode->htotal - mode->hdisplay < 32)
9354                         return MODE_H_ILLEGAL;
9355
9356                 if (mode->vtotal - mode->vdisplay < 5)
9357                         return MODE_V_ILLEGAL;
9358         } else {
9359                 if (mode->htotal - mode->hdisplay < 32)
9360                         return MODE_H_ILLEGAL;
9361
9362                 if (mode->vtotal - mode->vdisplay < 3)
9363                         return MODE_V_ILLEGAL;
9364         }
9365
9366         /*
9367          * Cantiga+ cannot handle modes with a hsync front porch of 0.
9368          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9369          */
9370         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9371             mode->hsync_start == mode->hdisplay)
9372                 return MODE_H_ILLEGAL;
9373
9374         return MODE_OK;
9375 }
9376
9377 enum drm_mode_status
9378 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9379                                 const struct drm_display_mode *mode,
9380                                 bool bigjoiner)
9381 {
9382         int plane_width_max, plane_height_max;
9383
9384         /*
9385          * intel_mode_valid() should be
9386          * sufficient on older platforms.
9387          */
9388         if (DISPLAY_VER(dev_priv) < 9)
9389                 return MODE_OK;
9390
9391         /*
9392          * Most people will probably want a fullscreen
9393          * plane so let's not advertize modes that are
9394          * too big for that.
9395          */
9396         if (DISPLAY_VER(dev_priv) >= 11) {
9397                 plane_width_max = 5120 << bigjoiner;
9398                 plane_height_max = 4320;
9399         } else {
9400                 plane_width_max = 5120;
9401                 plane_height_max = 4096;
9402         }
9403
9404         if (mode->hdisplay > plane_width_max)
9405                 return MODE_H_ILLEGAL;
9406
9407         if (mode->vdisplay > plane_height_max)
9408                 return MODE_V_ILLEGAL;
9409
9410         return MODE_OK;
9411 }
9412
9413 static const struct drm_mode_config_funcs intel_mode_funcs = {
9414         .fb_create = intel_user_framebuffer_create,
9415         .get_format_info = intel_fb_get_format_info,
9416         .output_poll_changed = intel_fbdev_output_poll_changed,
9417         .mode_valid = intel_mode_valid,
9418         .atomic_check = intel_atomic_check,
9419         .atomic_commit = intel_atomic_commit,
9420         .atomic_state_alloc = intel_atomic_state_alloc,
9421         .atomic_state_clear = intel_atomic_state_clear,
9422         .atomic_state_free = intel_atomic_state_free,
9423 };
9424
9425 static const struct drm_i915_display_funcs skl_display_funcs = {
9426         .get_pipe_config = hsw_get_pipe_config,
9427         .crtc_enable = hsw_crtc_enable,
9428         .crtc_disable = hsw_crtc_disable,
9429         .commit_modeset_enables = skl_commit_modeset_enables,
9430         .get_initial_plane_config = skl_get_initial_plane_config,
9431 };
9432
9433 static const struct drm_i915_display_funcs ddi_display_funcs = {
9434         .get_pipe_config = hsw_get_pipe_config,
9435         .crtc_enable = hsw_crtc_enable,
9436         .crtc_disable = hsw_crtc_disable,
9437         .commit_modeset_enables = intel_commit_modeset_enables,
9438         .get_initial_plane_config = i9xx_get_initial_plane_config,
9439 };
9440
9441 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9442         .get_pipe_config = ilk_get_pipe_config,
9443         .crtc_enable = ilk_crtc_enable,
9444         .crtc_disable = ilk_crtc_disable,
9445         .commit_modeset_enables = intel_commit_modeset_enables,
9446         .get_initial_plane_config = i9xx_get_initial_plane_config,
9447 };
9448
9449 static const struct drm_i915_display_funcs vlv_display_funcs = {
9450         .get_pipe_config = i9xx_get_pipe_config,
9451         .crtc_enable = valleyview_crtc_enable,
9452         .crtc_disable = i9xx_crtc_disable,
9453         .commit_modeset_enables = intel_commit_modeset_enables,
9454         .get_initial_plane_config = i9xx_get_initial_plane_config,
9455 };
9456
9457 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9458         .get_pipe_config = i9xx_get_pipe_config,
9459         .crtc_enable = i9xx_crtc_enable,
9460         .crtc_disable = i9xx_crtc_disable,
9461         .commit_modeset_enables = intel_commit_modeset_enables,
9462         .get_initial_plane_config = i9xx_get_initial_plane_config,
9463 };
9464
9465 /**
9466  * intel_init_display_hooks - initialize the display modesetting hooks
9467  * @dev_priv: device private
9468  */
9469 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9470 {
9471         if (!HAS_DISPLAY(dev_priv))
9472                 return;
9473
9474         intel_init_cdclk_hooks(dev_priv);
9475         intel_audio_hooks_init(dev_priv);
9476
9477         intel_dpll_init_clock_hook(dev_priv);
9478
9479         if (DISPLAY_VER(dev_priv) >= 9) {
9480                 dev_priv->display = &skl_display_funcs;
9481         } else if (HAS_DDI(dev_priv)) {
9482                 dev_priv->display = &ddi_display_funcs;
9483         } else if (HAS_PCH_SPLIT(dev_priv)) {
9484                 dev_priv->display = &pch_split_display_funcs;
9485         } else if (IS_CHERRYVIEW(dev_priv) ||
9486                    IS_VALLEYVIEW(dev_priv)) {
9487                 dev_priv->display = &vlv_display_funcs;
9488         } else {
9489                 dev_priv->display = &i9xx_display_funcs;
9490         }
9491
9492         intel_fdi_init_hook(dev_priv);
9493 }
9494
9495 void intel_modeset_init_hw(struct drm_i915_private *i915)
9496 {
9497         struct intel_cdclk_state *cdclk_state;
9498
9499         if (!HAS_DISPLAY(i915))
9500                 return;
9501
9502         cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9503
9504         intel_update_cdclk(i915);
9505         intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
9506         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9507 }
9508
9509 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9510 {
9511         struct drm_plane *plane;
9512         struct intel_crtc *crtc;
9513
9514         for_each_intel_crtc(state->dev, crtc) {
9515                 struct intel_crtc_state *crtc_state;
9516
9517                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
9518                 if (IS_ERR(crtc_state))
9519                         return PTR_ERR(crtc_state);
9520
9521                 if (crtc_state->hw.active) {
9522                         /*
9523                          * Preserve the inherited flag to avoid
9524                          * taking the full modeset path.
9525                          */
9526                         crtc_state->inherited = true;
9527                 }
9528         }
9529
9530         drm_for_each_plane(plane, state->dev) {
9531                 struct drm_plane_state *plane_state;
9532
9533                 plane_state = drm_atomic_get_plane_state(state, plane);
9534                 if (IS_ERR(plane_state))
9535                         return PTR_ERR(plane_state);
9536         }
9537
9538         return 0;
9539 }
9540
9541 /*
9542  * Calculate what we think the watermarks should be for the state we've read
9543  * out of the hardware and then immediately program those watermarks so that
9544  * we ensure the hardware settings match our internal state.
9545  *
9546  * We can calculate what we think WM's should be by creating a duplicate of the
9547  * current state (which was constructed during hardware readout) and running it
9548  * through the atomic check code to calculate new watermark values in the
9549  * state object.
9550  */
9551 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9552 {
9553         struct drm_atomic_state *state;
9554         struct intel_atomic_state *intel_state;
9555         struct intel_crtc *crtc;
9556         struct intel_crtc_state *crtc_state;
9557         struct drm_modeset_acquire_ctx ctx;
9558         int ret;
9559         int i;
9560
9561         /* Only supported on platforms that use atomic watermark design */
9562         if (!dev_priv->wm_disp->optimize_watermarks)
9563                 return;
9564
9565         state = drm_atomic_state_alloc(&dev_priv->drm);
9566         if (drm_WARN_ON(&dev_priv->drm, !state))
9567                 return;
9568
9569         intel_state = to_intel_atomic_state(state);
9570
9571         drm_modeset_acquire_init(&ctx, 0);
9572
9573 retry:
9574         state->acquire_ctx = &ctx;
9575
9576         /*
9577          * Hardware readout is the only time we don't want to calculate
9578          * intermediate watermarks (since we don't trust the current
9579          * watermarks).
9580          */
9581         if (!HAS_GMCH(dev_priv))
9582                 intel_state->skip_intermediate_wm = true;
9583
9584         ret = sanitize_watermarks_add_affected(state);
9585         if (ret)
9586                 goto fail;
9587
9588         ret = intel_atomic_check(&dev_priv->drm, state);
9589         if (ret)
9590                 goto fail;
9591
9592         /* Write calculated watermark values back */
9593         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9594                 crtc_state->wm.need_postvbl_update = true;
9595                 intel_optimize_watermarks(intel_state, crtc);
9596
9597                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9598         }
9599
9600 fail:
9601         if (ret == -EDEADLK) {
9602                 drm_atomic_state_clear(state);
9603                 drm_modeset_backoff(&ctx);
9604                 goto retry;
9605         }
9606
9607         /*
9608          * If we fail here, it means that the hardware appears to be
9609          * programmed in a way that shouldn't be possible, given our
9610          * understanding of watermark requirements.  This might mean a
9611          * mistake in the hardware readout code or a mistake in the
9612          * watermark calculations for a given platform.  Raise a WARN
9613          * so that this is noticeable.
9614          *
9615          * If this actually happens, we'll have to just leave the
9616          * BIOS-programmed watermarks untouched and hope for the best.
9617          */
9618         drm_WARN(&dev_priv->drm, ret,
9619                  "Could not determine valid watermarks for inherited state\n");
9620
9621         drm_atomic_state_put(state);
9622
9623         drm_modeset_drop_locks(&ctx);
9624         drm_modeset_acquire_fini(&ctx);
9625 }
9626
9627 static int intel_initial_commit(struct drm_device *dev)
9628 {
9629         struct drm_atomic_state *state = NULL;
9630         struct drm_modeset_acquire_ctx ctx;
9631         struct intel_crtc *crtc;
9632         int ret = 0;
9633
9634         state = drm_atomic_state_alloc(dev);
9635         if (!state)
9636                 return -ENOMEM;
9637
9638         drm_modeset_acquire_init(&ctx, 0);
9639
9640 retry:
9641         state->acquire_ctx = &ctx;
9642
9643         for_each_intel_crtc(dev, crtc) {
9644                 struct intel_crtc_state *crtc_state =
9645                         intel_atomic_get_crtc_state(state, crtc);
9646
9647                 if (IS_ERR(crtc_state)) {
9648                         ret = PTR_ERR(crtc_state);
9649                         goto out;
9650                 }
9651
9652                 if (crtc_state->hw.active) {
9653                         struct intel_encoder *encoder;
9654
9655                         /*
9656                          * We've not yet detected sink capabilities
9657                          * (audio,infoframes,etc.) and thus we don't want to
9658                          * force a full state recomputation yet. We want that to
9659                          * happen only for the first real commit from userspace.
9660                          * So preserve the inherited flag for the time being.
9661                          */
9662                         crtc_state->inherited = true;
9663
9664                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
9665                         if (ret)
9666                                 goto out;
9667
9668                         /*
9669                          * FIXME hack to force a LUT update to avoid the
9670                          * plane update forcing the pipe gamma on without
9671                          * having a proper LUT loaded. Remove once we
9672                          * have readout for pipe gamma enable.
9673                          */
9674                         crtc_state->uapi.color_mgmt_changed = true;
9675
9676                         for_each_intel_encoder_mask(dev, encoder,
9677                                                     crtc_state->uapi.encoder_mask) {
9678                                 if (encoder->initial_fastset_check &&
9679                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
9680                                         ret = drm_atomic_add_affected_connectors(state,
9681                                                                                  &crtc->base);
9682                                         if (ret)
9683                                                 goto out;
9684                                 }
9685                         }
9686                 }
9687         }
9688
9689         ret = drm_atomic_commit(state);
9690
9691 out:
9692         if (ret == -EDEADLK) {
9693                 drm_atomic_state_clear(state);
9694                 drm_modeset_backoff(&ctx);
9695                 goto retry;
9696         }
9697
9698         drm_atomic_state_put(state);
9699
9700         drm_modeset_drop_locks(&ctx);
9701         drm_modeset_acquire_fini(&ctx);
9702
9703         return ret;
9704 }
9705
9706 static void intel_mode_config_init(struct drm_i915_private *i915)
9707 {
9708         struct drm_mode_config *mode_config = &i915->drm.mode_config;
9709
9710         drm_mode_config_init(&i915->drm);
9711         INIT_LIST_HEAD(&i915->global_obj_list);
9712
9713         mode_config->min_width = 0;
9714         mode_config->min_height = 0;
9715
9716         mode_config->preferred_depth = 24;
9717         mode_config->prefer_shadow = 1;
9718
9719         mode_config->funcs = &intel_mode_funcs;
9720
9721         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9722
9723         /*
9724          * Maximum framebuffer dimensions, chosen to match
9725          * the maximum render engine surface size on gen4+.
9726          */
9727         if (DISPLAY_VER(i915) >= 7) {
9728                 mode_config->max_width = 16384;
9729                 mode_config->max_height = 16384;
9730         } else if (DISPLAY_VER(i915) >= 4) {
9731                 mode_config->max_width = 8192;
9732                 mode_config->max_height = 8192;
9733         } else if (DISPLAY_VER(i915) == 3) {
9734                 mode_config->max_width = 4096;
9735                 mode_config->max_height = 4096;
9736         } else {
9737                 mode_config->max_width = 2048;
9738                 mode_config->max_height = 2048;
9739         }
9740
9741         if (IS_I845G(i915) || IS_I865G(i915)) {
9742                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9743                 mode_config->cursor_height = 1023;
9744         } else if (IS_I830(i915) || IS_I85X(i915) ||
9745                    IS_I915G(i915) || IS_I915GM(i915)) {
9746                 mode_config->cursor_width = 64;
9747                 mode_config->cursor_height = 64;
9748         } else {
9749                 mode_config->cursor_width = 256;
9750                 mode_config->cursor_height = 256;
9751         }
9752 }
9753
9754 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9755 {
9756         intel_atomic_global_obj_cleanup(i915);
9757         drm_mode_config_cleanup(&i915->drm);
9758 }
9759
9760 /* part #1: call before irq install */
9761 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9762 {
9763         int ret;
9764
9765         if (i915_inject_probe_failure(i915))
9766                 return -ENODEV;
9767
9768         if (HAS_DISPLAY(i915)) {
9769                 ret = drm_vblank_init(&i915->drm,
9770                                       INTEL_NUM_PIPES(i915));
9771                 if (ret)
9772                         return ret;
9773         }
9774
9775         intel_bios_init(i915);
9776
9777         ret = intel_vga_register(i915);
9778         if (ret)
9779                 goto cleanup_bios;
9780
9781         /* FIXME: completely on the wrong abstraction layer */
9782         intel_power_domains_init_hw(i915, false);
9783
9784         if (!HAS_DISPLAY(i915))
9785                 return 0;
9786
9787         intel_dmc_ucode_init(i915);
9788
9789         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9790         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9791                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9792
9793         i915->framestart_delay = 1; /* 1-4 */
9794
9795         i915->window2_delay = 0; /* No DSB so no window2 delay */
9796
9797         intel_mode_config_init(i915);
9798
9799         ret = intel_cdclk_init(i915);
9800         if (ret)
9801                 goto cleanup_vga_client_pw_domain_dmc;
9802
9803         ret = intel_dbuf_init(i915);
9804         if (ret)
9805                 goto cleanup_vga_client_pw_domain_dmc;
9806
9807         ret = intel_bw_init(i915);
9808         if (ret)
9809                 goto cleanup_vga_client_pw_domain_dmc;
9810
9811         init_llist_head(&i915->atomic_helper.free_list);
9812         INIT_WORK(&i915->atomic_helper.free_work,
9813                   intel_atomic_helper_free_state_worker);
9814
9815         intel_init_quirks(i915);
9816
9817         intel_fbc_init(i915);
9818
9819         return 0;
9820
9821 cleanup_vga_client_pw_domain_dmc:
9822         intel_dmc_ucode_fini(i915);
9823         intel_power_domains_driver_remove(i915);
9824         intel_vga_unregister(i915);
9825 cleanup_bios:
9826         intel_bios_driver_remove(i915);
9827
9828         return ret;
9829 }
9830
9831 /* part #2: call after irq install, but before gem init */
9832 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9833 {
9834         struct drm_device *dev = &i915->drm;
9835         enum pipe pipe;
9836         struct intel_crtc *crtc;
9837         int ret;
9838
9839         if (!HAS_DISPLAY(i915))
9840                 return 0;
9841
9842         intel_init_pm(i915);
9843
9844         intel_panel_sanitize_ssc(i915);
9845
9846         intel_pps_setup(i915);
9847
9848         intel_gmbus_setup(i915);
9849
9850         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9851                     INTEL_NUM_PIPES(i915),
9852                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9853
9854         for_each_pipe(i915, pipe) {
9855                 ret = intel_crtc_init(i915, pipe);
9856                 if (ret) {
9857                         intel_mode_config_cleanup(i915);
9858                         return ret;
9859                 }
9860         }
9861
9862         intel_plane_possible_crtcs_init(i915);
9863         intel_shared_dpll_init(dev);
9864         intel_fdi_pll_freq_update(i915);
9865
9866         intel_update_czclk(i915);
9867         intel_modeset_init_hw(i915);
9868         intel_dpll_update_ref_clks(i915);
9869
9870         intel_hdcp_component_init(i915);
9871
9872         if (i915->max_cdclk_freq == 0)
9873                 intel_update_max_cdclk(i915);
9874
9875         /*
9876          * If the platform has HTI, we need to find out whether it has reserved
9877          * any display resources before we create our display outputs.
9878          */
9879         if (INTEL_INFO(i915)->display.has_hti)
9880                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9881
9882         /* Just disable it once at startup */
9883         intel_vga_disable(i915);
9884         intel_setup_outputs(i915);
9885
9886         drm_modeset_lock_all(dev);
9887         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9888         intel_acpi_assign_connector_fwnodes(i915);
9889         drm_modeset_unlock_all(dev);
9890
9891         for_each_intel_crtc(dev, crtc) {
9892                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9893                         continue;
9894                 intel_crtc_initial_plane_config(crtc);
9895         }
9896
9897         /*
9898          * Make sure hardware watermarks really match the state we read out.
9899          * Note that we need to do this after reconstructing the BIOS fb's
9900          * since the watermark calculation done here will use pstate->fb.
9901          */
9902         if (!HAS_GMCH(i915))
9903                 sanitize_watermarks(i915);
9904
9905         return 0;
9906 }
9907
9908 /* part #3: call after gem init */
9909 int intel_modeset_init(struct drm_i915_private *i915)
9910 {
9911         int ret;
9912
9913         if (!HAS_DISPLAY(i915))
9914                 return 0;
9915
9916         /*
9917          * Force all active planes to recompute their states. So that on
9918          * mode_setcrtc after probe, all the intel_plane_state variables
9919          * are already calculated and there is no assert_plane warnings
9920          * during bootup.
9921          */
9922         ret = intel_initial_commit(&i915->drm);
9923         if (ret)
9924                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9925
9926         intel_overlay_setup(i915);
9927
9928         ret = intel_fbdev_init(&i915->drm);
9929         if (ret)
9930                 return ret;
9931
9932         /* Only enable hotplug handling once the fbdev is fully set up. */
9933         intel_hpd_init(i915);
9934         intel_hpd_poll_disable(i915);
9935
9936         intel_init_ipc(i915);
9937
9938         return 0;
9939 }
9940
9941 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9942 {
9943         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
9944         /* 640x480@60Hz, ~25175 kHz */
9945         struct dpll clock = {
9946                 .m1 = 18,
9947                 .m2 = 7,
9948                 .p1 = 13,
9949                 .p2 = 4,
9950                 .n = 2,
9951         };
9952         u32 dpll, fp;
9953         int i;
9954
9955         drm_WARN_ON(&dev_priv->drm,
9956                     i9xx_calc_dpll_params(48000, &clock) != 25154);
9957
9958         drm_dbg_kms(&dev_priv->drm,
9959                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9960                     pipe_name(pipe), clock.vco, clock.dot);
9961
9962         fp = i9xx_dpll_compute_fp(&clock);
9963         dpll = DPLL_DVO_2X_MODE |
9964                 DPLL_VGA_MODE_DIS |
9965                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9966                 PLL_P2_DIVIDE_BY_4 |
9967                 PLL_REF_INPUT_DREFCLK |
9968                 DPLL_VCO_ENABLE;
9969
9970         intel_de_write(dev_priv, FP0(pipe), fp);
9971         intel_de_write(dev_priv, FP1(pipe), fp);
9972
9973         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9974         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9975         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9976         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9977         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9978         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9979         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9980
9981         /*
9982          * Apparently we need to have VGA mode enabled prior to changing
9983          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
9984          * dividers, even though the register value does change.
9985          */
9986         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
9987         intel_de_write(dev_priv, DPLL(pipe), dpll);
9988
9989         /* Wait for the clocks to stabilize. */
9990         intel_de_posting_read(dev_priv, DPLL(pipe));
9991         udelay(150);
9992
9993         /* The pixel multiplier can only be updated once the
9994          * DPLL is enabled and the clocks are stable.
9995          *
9996          * So write it again.
9997          */
9998         intel_de_write(dev_priv, DPLL(pipe), dpll);
9999
10000         /* We do this three times for luck */
10001         for (i = 0; i < 3 ; i++) {
10002                 intel_de_write(dev_priv, DPLL(pipe), dpll);
10003                 intel_de_posting_read(dev_priv, DPLL(pipe));
10004                 udelay(150); /* wait for warmup */
10005         }
10006
10007         intel_de_write(dev_priv, PIPECONF(pipe),
10008                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
10009         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10010
10011         intel_wait_for_pipe_scanline_moving(crtc);
10012 }
10013
10014 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
10015 {
10016         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
10017
10018         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
10019                     pipe_name(pipe));
10020
10021         drm_WARN_ON(&dev_priv->drm,
10022                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
10023         drm_WARN_ON(&dev_priv->drm,
10024                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
10025         drm_WARN_ON(&dev_priv->drm,
10026                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
10027         drm_WARN_ON(&dev_priv->drm,
10028                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
10029         drm_WARN_ON(&dev_priv->drm,
10030                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
10031
10032         intel_de_write(dev_priv, PIPECONF(pipe), 0);
10033         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10034
10035         intel_wait_for_pipe_scanline_stopped(crtc);
10036
10037         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10038         intel_de_posting_read(dev_priv, DPLL(pipe));
10039 }
10040
10041 static void
10042 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10043 {
10044         struct intel_crtc *crtc;
10045
10046         if (DISPLAY_VER(dev_priv) >= 4)
10047                 return;
10048
10049         for_each_intel_crtc(&dev_priv->drm, crtc) {
10050                 struct intel_plane *plane =
10051                         to_intel_plane(crtc->base.primary);
10052                 struct intel_crtc *plane_crtc;
10053                 enum pipe pipe;
10054
10055                 if (!plane->get_hw_state(plane, &pipe))
10056                         continue;
10057
10058                 if (pipe == crtc->pipe)
10059                         continue;
10060
10061                 drm_dbg_kms(&dev_priv->drm,
10062                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10063                             plane->base.base.id, plane->base.name);
10064
10065                 plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
10066                 intel_plane_disable_noatomic(plane_crtc, plane);
10067         }
10068 }
10069
10070 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10071 {
10072         struct drm_device *dev = crtc->base.dev;
10073         struct intel_encoder *encoder;
10074
10075         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10076                 return true;
10077
10078         return false;
10079 }
10080
10081 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10082 {
10083         struct drm_device *dev = encoder->base.dev;
10084         struct intel_connector *connector;
10085
10086         for_each_connector_on_encoder(dev, &encoder->base, connector)
10087                 return connector;
10088
10089         return NULL;
10090 }
10091
10092 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10093                               enum pipe pch_transcoder)
10094 {
10095         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10096                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10097 }
10098
10099 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10100 {
10101         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10102         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10103         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10104
10105         if (DISPLAY_VER(dev_priv) >= 9 ||
10106             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10107                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10108                 u32 val;
10109
10110                 if (transcoder_is_dsi(cpu_transcoder))
10111                         return;
10112
10113                 val = intel_de_read(dev_priv, reg);
10114                 val &= ~HSW_FRAME_START_DELAY_MASK;
10115                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10116                 intel_de_write(dev_priv, reg, val);
10117         } else {
10118                 i915_reg_t reg = PIPECONF(cpu_transcoder);
10119                 u32 val;
10120
10121                 val = intel_de_read(dev_priv, reg);
10122                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10123                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10124                 intel_de_write(dev_priv, reg, val);
10125         }
10126
10127         if (!crtc_state->has_pch_encoder)
10128                 return;
10129
10130         if (HAS_PCH_IBX(dev_priv)) {
10131                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10132                 u32 val;
10133
10134                 val = intel_de_read(dev_priv, reg);
10135                 val &= ~TRANS_FRAME_START_DELAY_MASK;
10136                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10137                 intel_de_write(dev_priv, reg, val);
10138         } else {
10139                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10140                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10141                 u32 val;
10142
10143                 val = intel_de_read(dev_priv, reg);
10144                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10145                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10146                 intel_de_write(dev_priv, reg, val);
10147         }
10148 }
10149
10150 static void intel_sanitize_crtc(struct intel_crtc *crtc,
10151                                 struct drm_modeset_acquire_ctx *ctx)
10152 {
10153         struct drm_device *dev = crtc->base.dev;
10154         struct drm_i915_private *dev_priv = to_i915(dev);
10155         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10156
10157         if (crtc_state->hw.active) {
10158                 struct intel_plane *plane;
10159
10160                 /* Clear any frame start delays used for debugging left by the BIOS */
10161                 intel_sanitize_frame_start_delay(crtc_state);
10162
10163                 /* Disable everything but the primary plane */
10164                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
10165                         const struct intel_plane_state *plane_state =
10166                                 to_intel_plane_state(plane->base.state);
10167
10168                         if (plane_state->uapi.visible &&
10169                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10170                                 intel_plane_disable_noatomic(crtc, plane);
10171                 }
10172
10173                 /* Disable any background color/etc. set by the BIOS */
10174                 intel_color_commit(crtc_state);
10175         }
10176
10177         /* Adjust the state of the output pipe according to whether we
10178          * have active connectors/encoders. */
10179         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10180             !crtc_state->bigjoiner_slave)
10181                 intel_crtc_disable_noatomic(crtc, ctx);
10182
10183         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10184                 /*
10185                  * We start out with underrun reporting disabled to avoid races.
10186                  * For correct bookkeeping mark this on active crtcs.
10187                  *
10188                  * Also on gmch platforms we dont have any hardware bits to
10189                  * disable the underrun reporting. Which means we need to start
10190                  * out with underrun reporting disabled also on inactive pipes,
10191                  * since otherwise we'll complain about the garbage we read when
10192                  * e.g. coming up after runtime pm.
10193                  *
10194                  * No protection against concurrent access is required - at
10195                  * worst a fifo underrun happens which also sets this to false.
10196                  */
10197                 crtc->cpu_fifo_underrun_disabled = true;
10198                 /*
10199                  * We track the PCH trancoder underrun reporting state
10200                  * within the crtc. With crtc for pipe A housing the underrun
10201                  * reporting state for PCH transcoder A, crtc for pipe B housing
10202                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10203                  * and marking underrun reporting as disabled for the non-existing
10204                  * PCH transcoders B and C would prevent enabling the south
10205                  * error interrupt (see cpt_can_enable_serr_int()).
10206                  */
10207                 if (has_pch_trancoder(dev_priv, crtc->pipe))
10208                         crtc->pch_fifo_underrun_disabled = true;
10209         }
10210 }
10211
10212 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10213 {
10214         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10215
10216         /*
10217          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10218          * the hardware when a high res displays plugged in. DPLL P
10219          * divider is zero, and the pipe timings are bonkers. We'll
10220          * try to disable everything in that case.
10221          *
10222          * FIXME would be nice to be able to sanitize this state
10223          * without several WARNs, but for now let's take the easy
10224          * road.
10225          */
10226         return IS_SANDYBRIDGE(dev_priv) &&
10227                 crtc_state->hw.active &&
10228                 crtc_state->shared_dpll &&
10229                 crtc_state->port_clock == 0;
10230 }
10231
10232 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10233 {
10234         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10235         struct intel_connector *connector;
10236         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10237         struct intel_crtc_state *crtc_state = crtc ?
10238                 to_intel_crtc_state(crtc->base.state) : NULL;
10239
10240         /* We need to check both for a crtc link (meaning that the
10241          * encoder is active and trying to read from a pipe) and the
10242          * pipe itself being active. */
10243         bool has_active_crtc = crtc_state &&
10244                 crtc_state->hw.active;
10245
10246         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10247                 drm_dbg_kms(&dev_priv->drm,
10248                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10249                             pipe_name(crtc->pipe));
10250                 has_active_crtc = false;
10251         }
10252
10253         connector = intel_encoder_find_connector(encoder);
10254         if (connector && !has_active_crtc) {
10255                 drm_dbg_kms(&dev_priv->drm,
10256                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10257                             encoder->base.base.id,
10258                             encoder->base.name);
10259
10260                 /* Connector is active, but has no active pipe. This is
10261                  * fallout from our resume register restoring. Disable
10262                  * the encoder manually again. */
10263                 if (crtc_state) {
10264                         struct drm_encoder *best_encoder;
10265
10266                         drm_dbg_kms(&dev_priv->drm,
10267                                     "[ENCODER:%d:%s] manually disabled\n",
10268                                     encoder->base.base.id,
10269                                     encoder->base.name);
10270
10271                         /* avoid oopsing in case the hooks consult best_encoder */
10272                         best_encoder = connector->base.state->best_encoder;
10273                         connector->base.state->best_encoder = &encoder->base;
10274
10275                         /* FIXME NULL atomic state passed! */
10276                         if (encoder->disable)
10277                                 encoder->disable(NULL, encoder, crtc_state,
10278                                                  connector->base.state);
10279                         if (encoder->post_disable)
10280                                 encoder->post_disable(NULL, encoder, crtc_state,
10281                                                       connector->base.state);
10282
10283                         connector->base.state->best_encoder = best_encoder;
10284                 }
10285                 encoder->base.crtc = NULL;
10286
10287                 /* Inconsistent output/port/pipe state happens presumably due to
10288                  * a bug in one of the get_hw_state functions. Or someplace else
10289                  * in our code, like the register restore mess on resume. Clamp
10290                  * things to off as a safer default. */
10291
10292                 connector->base.dpms = DRM_MODE_DPMS_OFF;
10293                 connector->base.encoder = NULL;
10294         }
10295
10296         /* notify opregion of the sanitized encoder state */
10297         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10298
10299         if (HAS_DDI(dev_priv))
10300                 intel_ddi_sanitize_encoder_pll_mapping(encoder);
10301 }
10302
10303 /* FIXME read out full plane state for all planes */
10304 static void readout_plane_state(struct drm_i915_private *dev_priv)
10305 {
10306         struct intel_plane *plane;
10307         struct intel_crtc *crtc;
10308
10309         for_each_intel_plane(&dev_priv->drm, plane) {
10310                 struct intel_plane_state *plane_state =
10311                         to_intel_plane_state(plane->base.state);
10312                 struct intel_crtc_state *crtc_state;
10313                 enum pipe pipe = PIPE_A;
10314                 bool visible;
10315
10316                 visible = plane->get_hw_state(plane, &pipe);
10317
10318                 crtc = intel_crtc_for_pipe(dev_priv, pipe);
10319                 crtc_state = to_intel_crtc_state(crtc->base.state);
10320
10321                 intel_set_plane_visible(crtc_state, plane_state, visible);
10322
10323                 drm_dbg_kms(&dev_priv->drm,
10324                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10325                             plane->base.base.id, plane->base.name,
10326                             enableddisabled(visible), pipe_name(pipe));
10327         }
10328
10329         for_each_intel_crtc(&dev_priv->drm, crtc) {
10330                 struct intel_crtc_state *crtc_state =
10331                         to_intel_crtc_state(crtc->base.state);
10332
10333                 fixup_plane_bitmasks(crtc_state);
10334         }
10335 }
10336
10337 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10338 {
10339         struct drm_i915_private *dev_priv = to_i915(dev);
10340         struct intel_cdclk_state *cdclk_state =
10341                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10342         struct intel_dbuf_state *dbuf_state =
10343                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10344         enum pipe pipe;
10345         struct intel_crtc *crtc;
10346         struct intel_encoder *encoder;
10347         struct intel_connector *connector;
10348         struct drm_connector_list_iter conn_iter;
10349         u8 active_pipes = 0;
10350
10351         for_each_intel_crtc(dev, crtc) {
10352                 struct intel_crtc_state *crtc_state =
10353                         to_intel_crtc_state(crtc->base.state);
10354
10355                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10356                 intel_crtc_free_hw_state(crtc_state);
10357                 intel_crtc_state_reset(crtc_state, crtc);
10358
10359                 intel_crtc_get_pipe_config(crtc_state);
10360
10361                 crtc_state->hw.enable = crtc_state->hw.active;
10362
10363                 crtc->base.enabled = crtc_state->hw.enable;
10364                 crtc->active = crtc_state->hw.active;
10365
10366                 if (crtc_state->hw.active)
10367                         active_pipes |= BIT(crtc->pipe);
10368
10369                 drm_dbg_kms(&dev_priv->drm,
10370                             "[CRTC:%d:%s] hw state readout: %s\n",
10371                             crtc->base.base.id, crtc->base.name,
10372                             enableddisabled(crtc_state->hw.active));
10373         }
10374
10375         cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10376
10377         readout_plane_state(dev_priv);
10378
10379         for_each_intel_encoder(dev, encoder) {
10380                 struct intel_crtc_state *crtc_state = NULL;
10381
10382                 pipe = 0;
10383
10384                 if (encoder->get_hw_state(encoder, &pipe)) {
10385                         crtc = intel_crtc_for_pipe(dev_priv, pipe);
10386                         crtc_state = to_intel_crtc_state(crtc->base.state);
10387
10388                         encoder->base.crtc = &crtc->base;
10389                         intel_encoder_get_config(encoder, crtc_state);
10390
10391                         /* read out to slave crtc as well for bigjoiner */
10392                         if (crtc_state->bigjoiner) {
10393                                 /* encoder should read be linked to bigjoiner master */
10394                                 WARN_ON(crtc_state->bigjoiner_slave);
10395
10396                                 crtc = crtc_state->bigjoiner_linked_crtc;
10397                                 crtc_state = to_intel_crtc_state(crtc->base.state);
10398                                 intel_encoder_get_config(encoder, crtc_state);
10399                         }
10400                 } else {
10401                         encoder->base.crtc = NULL;
10402                 }
10403
10404                 if (encoder->sync_state)
10405                         encoder->sync_state(encoder, crtc_state);
10406
10407                 drm_dbg_kms(&dev_priv->drm,
10408                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10409                             encoder->base.base.id, encoder->base.name,
10410                             enableddisabled(encoder->base.crtc),
10411                             pipe_name(pipe));
10412         }
10413
10414         intel_dpll_readout_hw_state(dev_priv);
10415
10416         drm_connector_list_iter_begin(dev, &conn_iter);
10417         for_each_intel_connector_iter(connector, &conn_iter) {
10418                 if (connector->get_hw_state(connector)) {
10419                         struct intel_crtc_state *crtc_state;
10420                         struct intel_crtc *crtc;
10421
10422                         connector->base.dpms = DRM_MODE_DPMS_ON;
10423
10424                         encoder = intel_attached_encoder(connector);
10425                         connector->base.encoder = &encoder->base;
10426
10427                         crtc = to_intel_crtc(encoder->base.crtc);
10428                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10429
10430                         if (crtc_state && crtc_state->hw.active) {
10431                                 /*
10432                                  * This has to be done during hardware readout
10433                                  * because anything calling .crtc_disable may
10434                                  * rely on the connector_mask being accurate.
10435                                  */
10436                                 crtc_state->uapi.connector_mask |=
10437                                         drm_connector_mask(&connector->base);
10438                                 crtc_state->uapi.encoder_mask |=
10439                                         drm_encoder_mask(&encoder->base);
10440                         }
10441                 } else {
10442                         connector->base.dpms = DRM_MODE_DPMS_OFF;
10443                         connector->base.encoder = NULL;
10444                 }
10445                 drm_dbg_kms(&dev_priv->drm,
10446                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
10447                             connector->base.base.id, connector->base.name,
10448                             enableddisabled(connector->base.encoder));
10449         }
10450         drm_connector_list_iter_end(&conn_iter);
10451
10452         for_each_intel_crtc(dev, crtc) {
10453                 struct intel_bw_state *bw_state =
10454                         to_intel_bw_state(dev_priv->bw_obj.state);
10455                 struct intel_crtc_state *crtc_state =
10456                         to_intel_crtc_state(crtc->base.state);
10457                 struct intel_plane *plane;
10458                 int min_cdclk = 0;
10459
10460                 if (crtc_state->hw.active) {
10461                         /*
10462                          * The initial mode needs to be set in order to keep
10463                          * the atomic core happy. It wants a valid mode if the
10464                          * crtc's enabled, so we do the above call.
10465                          *
10466                          * But we don't set all the derived state fully, hence
10467                          * set a flag to indicate that a full recalculation is
10468                          * needed on the next commit.
10469                          */
10470                         crtc_state->inherited = true;
10471
10472                         intel_crtc_update_active_timings(crtc_state);
10473
10474                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
10475                 }
10476
10477                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10478                         const struct intel_plane_state *plane_state =
10479                                 to_intel_plane_state(plane->base.state);
10480
10481                         /*
10482                          * FIXME don't have the fb yet, so can't
10483                          * use intel_plane_data_rate() :(
10484                          */
10485                         if (plane_state->uapi.visible)
10486                                 crtc_state->data_rate[plane->id] =
10487                                         4 * crtc_state->pixel_rate;
10488                         /*
10489                          * FIXME don't have the fb yet, so can't
10490                          * use plane->min_cdclk() :(
10491                          */
10492                         if (plane_state->uapi.visible && plane->min_cdclk) {
10493                                 if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10494                                         crtc_state->min_cdclk[plane->id] =
10495                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10496                                 else
10497                                         crtc_state->min_cdclk[plane->id] =
10498                                                 crtc_state->pixel_rate;
10499                         }
10500                         drm_dbg_kms(&dev_priv->drm,
10501                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
10502                                     plane->base.base.id, plane->base.name,
10503                                     crtc_state->min_cdclk[plane->id]);
10504                 }
10505
10506                 if (crtc_state->hw.active) {
10507                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10508                         if (drm_WARN_ON(dev, min_cdclk < 0))
10509                                 min_cdclk = 0;
10510                 }
10511
10512                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10513                 cdclk_state->min_voltage_level[crtc->pipe] =
10514                         crtc_state->min_voltage_level;
10515
10516                 intel_bw_crtc_update(bw_state, crtc_state);
10517
10518                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
10519         }
10520 }
10521
10522 static void
10523 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10524 {
10525         struct intel_encoder *encoder;
10526
10527         for_each_intel_encoder(&dev_priv->drm, encoder) {
10528                 struct intel_crtc_state *crtc_state;
10529
10530                 if (!encoder->get_power_domains)
10531                         continue;
10532
10533                 /*
10534                  * MST-primary and inactive encoders don't have a crtc state
10535                  * and neither of these require any power domain references.
10536                  */
10537                 if (!encoder->base.crtc)
10538                         continue;
10539
10540                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10541                 encoder->get_power_domains(encoder, crtc_state);
10542         }
10543 }
10544
10545 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10546 {
10547         /*
10548          * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10549          * Also known as Wa_14010480278.
10550          */
10551         if (IS_DISPLAY_VER(dev_priv, 10, 12))
10552                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10553                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10554
10555         if (IS_HASWELL(dev_priv)) {
10556                 /*
10557                  * WaRsPkgCStateDisplayPMReq:hsw
10558                  * System hang if this isn't done before disabling all planes!
10559                  */
10560                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
10561                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10562         }
10563
10564         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10565                 /* Display WA #1142:kbl,cfl,cml */
10566                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10567                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10568                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10569                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10570                              KBL_ARB_FILL_SPARE_14);
10571         }
10572 }
10573
10574 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10575                                        enum port port, i915_reg_t hdmi_reg)
10576 {
10577         u32 val = intel_de_read(dev_priv, hdmi_reg);
10578
10579         if (val & SDVO_ENABLE ||
10580             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10581                 return;
10582
10583         drm_dbg_kms(&dev_priv->drm,
10584                     "Sanitizing transcoder select for HDMI %c\n",
10585                     port_name(port));
10586
10587         val &= ~SDVO_PIPE_SEL_MASK;
10588         val |= SDVO_PIPE_SEL(PIPE_A);
10589
10590         intel_de_write(dev_priv, hdmi_reg, val);
10591 }
10592
10593 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10594                                      enum port port, i915_reg_t dp_reg)
10595 {
10596         u32 val = intel_de_read(dev_priv, dp_reg);
10597
10598         if (val & DP_PORT_EN ||
10599             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10600                 return;
10601
10602         drm_dbg_kms(&dev_priv->drm,
10603                     "Sanitizing transcoder select for DP %c\n",
10604                     port_name(port));
10605
10606         val &= ~DP_PIPE_SEL_MASK;
10607         val |= DP_PIPE_SEL(PIPE_A);
10608
10609         intel_de_write(dev_priv, dp_reg, val);
10610 }
10611
10612 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10613 {
10614         /*
10615          * The BIOS may select transcoder B on some of the PCH
10616          * ports even it doesn't enable the port. This would trip
10617          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10618          * Sanitize the transcoder select bits to prevent that. We
10619          * assume that the BIOS never actually enabled the port,
10620          * because if it did we'd actually have to toggle the port
10621          * on and back off to make the transcoder A select stick
10622          * (see. intel_dp_link_down(), intel_disable_hdmi(),
10623          * intel_disable_sdvo()).
10624          */
10625         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10626         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10627         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10628
10629         /* PCH SDVOB multiplex with HDMIB */
10630         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10631         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10632         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10633 }
10634
10635 /* Scan out the current hw modeset state,
10636  * and sanitizes it to the current state
10637  */
10638 static void
10639 intel_modeset_setup_hw_state(struct drm_device *dev,
10640                              struct drm_modeset_acquire_ctx *ctx)
10641 {
10642         struct drm_i915_private *dev_priv = to_i915(dev);
10643         struct intel_encoder *encoder;
10644         struct intel_crtc *crtc;
10645         intel_wakeref_t wakeref;
10646
10647         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10648
10649         intel_early_display_was(dev_priv);
10650         intel_modeset_readout_hw_state(dev);
10651
10652         /* HW state is read out, now we need to sanitize this mess. */
10653         get_encoder_power_domains(dev_priv);
10654
10655         if (HAS_PCH_IBX(dev_priv))
10656                 ibx_sanitize_pch_ports(dev_priv);
10657
10658         /*
10659          * intel_sanitize_plane_mapping() may need to do vblank
10660          * waits, so we need vblank interrupts restored beforehand.
10661          */
10662         for_each_intel_crtc(&dev_priv->drm, crtc) {
10663                 struct intel_crtc_state *crtc_state =
10664                         to_intel_crtc_state(crtc->base.state);
10665
10666                 drm_crtc_vblank_reset(&crtc->base);
10667
10668                 if (crtc_state->hw.active)
10669                         intel_crtc_vblank_on(crtc_state);
10670         }
10671
10672         intel_sanitize_plane_mapping(dev_priv);
10673
10674         for_each_intel_encoder(dev, encoder)
10675                 intel_sanitize_encoder(encoder);
10676
10677         for_each_intel_crtc(&dev_priv->drm, crtc) {
10678                 struct intel_crtc_state *crtc_state =
10679                         to_intel_crtc_state(crtc->base.state);
10680
10681                 intel_sanitize_crtc(crtc, ctx);
10682                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10683         }
10684
10685         intel_modeset_update_connector_atomic_state(dev);
10686
10687         intel_dpll_sanitize_state(dev_priv);
10688
10689         if (IS_G4X(dev_priv)) {
10690                 g4x_wm_get_hw_state(dev_priv);
10691                 g4x_wm_sanitize(dev_priv);
10692         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10693                 vlv_wm_get_hw_state(dev_priv);
10694                 vlv_wm_sanitize(dev_priv);
10695         } else if (DISPLAY_VER(dev_priv) >= 9) {
10696                 skl_wm_get_hw_state(dev_priv);
10697         } else if (HAS_PCH_SPLIT(dev_priv)) {
10698                 ilk_wm_get_hw_state(dev_priv);
10699         }
10700
10701         for_each_intel_crtc(dev, crtc) {
10702                 struct intel_crtc_state *crtc_state =
10703                         to_intel_crtc_state(crtc->base.state);
10704                 u64 put_domains;
10705
10706                 put_domains = modeset_get_crtc_power_domains(crtc_state);
10707                 if (drm_WARN_ON(dev, put_domains))
10708                         modeset_put_crtc_power_domains(crtc, put_domains);
10709         }
10710
10711         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10712 }
10713
10714 void intel_display_resume(struct drm_device *dev)
10715 {
10716         struct drm_i915_private *dev_priv = to_i915(dev);
10717         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10718         struct drm_modeset_acquire_ctx ctx;
10719         int ret;
10720
10721         if (!HAS_DISPLAY(dev_priv))
10722                 return;
10723
10724         dev_priv->modeset_restore_state = NULL;
10725         if (state)
10726                 state->acquire_ctx = &ctx;
10727
10728         drm_modeset_acquire_init(&ctx, 0);
10729
10730         while (1) {
10731                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
10732                 if (ret != -EDEADLK)
10733                         break;
10734
10735                 drm_modeset_backoff(&ctx);
10736         }
10737
10738         if (!ret)
10739                 ret = __intel_display_resume(dev, state, &ctx);
10740
10741         intel_enable_ipc(dev_priv);
10742         drm_modeset_drop_locks(&ctx);
10743         drm_modeset_acquire_fini(&ctx);
10744
10745         if (ret)
10746                 drm_err(&dev_priv->drm,
10747                         "Restoring old state failed with %i\n", ret);
10748         if (state)
10749                 drm_atomic_state_put(state);
10750 }
10751
10752 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10753 {
10754         struct intel_connector *connector;
10755         struct drm_connector_list_iter conn_iter;
10756
10757         /* Kill all the work that may have been queued by hpd. */
10758         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10759         for_each_intel_connector_iter(connector, &conn_iter) {
10760                 if (connector->modeset_retry_work.func)
10761                         cancel_work_sync(&connector->modeset_retry_work);
10762                 if (connector->hdcp.shim) {
10763                         cancel_delayed_work_sync(&connector->hdcp.check_work);
10764                         cancel_work_sync(&connector->hdcp.prop_work);
10765                 }
10766         }
10767         drm_connector_list_iter_end(&conn_iter);
10768 }
10769
10770 /* part #1: call before irq uninstall */
10771 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10772 {
10773         if (!HAS_DISPLAY(i915))
10774                 return;
10775
10776         flush_workqueue(i915->flip_wq);
10777         flush_workqueue(i915->modeset_wq);
10778
10779         flush_work(&i915->atomic_helper.free_work);
10780         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10781 }
10782
10783 /* part #2: call after irq uninstall */
10784 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10785 {
10786         if (!HAS_DISPLAY(i915))
10787                 return;
10788
10789         /*
10790          * Due to the hpd irq storm handling the hotplug work can re-arm the
10791          * poll handlers. Hence disable polling after hpd handling is shut down.
10792          */
10793         intel_hpd_poll_fini(i915);
10794
10795         /*
10796          * MST topology needs to be suspended so we don't have any calls to
10797          * fbdev after it's finalized. MST will be destroyed later as part of
10798          * drm_mode_config_cleanup()
10799          */
10800         intel_dp_mst_suspend(i915);
10801
10802         /* poll work can call into fbdev, hence clean that up afterwards */
10803         intel_fbdev_fini(i915);
10804
10805         intel_unregister_dsm_handler();
10806
10807         intel_fbc_global_disable(i915);
10808
10809         /* flush any delayed tasks or pending work */
10810         flush_scheduled_work();
10811
10812         intel_hdcp_component_fini(i915);
10813
10814         intel_mode_config_cleanup(i915);
10815
10816         intel_overlay_cleanup(i915);
10817
10818         intel_gmbus_teardown(i915);
10819
10820         destroy_workqueue(i915->flip_wq);
10821         destroy_workqueue(i915->modeset_wq);
10822
10823         intel_fbc_cleanup(i915);
10824 }
10825
10826 /* part #3: call after gem init */
10827 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10828 {
10829         intel_dmc_ucode_fini(i915);
10830
10831         intel_power_domains_driver_remove(i915);
10832
10833         intel_vga_unregister(i915);
10834
10835         intel_bios_driver_remove(i915);
10836 }
10837
10838 bool intel_modeset_probe_defer(struct pci_dev *pdev)
10839 {
10840         struct drm_privacy_screen *privacy_screen;
10841
10842         /*
10843          * apple-gmux is needed on dual GPU MacBook Pro
10844          * to probe the panel if we're the inactive GPU.
10845          */
10846         if (vga_switcheroo_client_probe_defer(pdev))
10847                 return true;
10848
10849         /* If the LCD panel has a privacy-screen, wait for it */
10850         privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
10851         if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
10852                 return true;
10853
10854         drm_privacy_screen_put(privacy_screen);
10855
10856         return false;
10857 }
10858
10859 void intel_display_driver_register(struct drm_i915_private *i915)
10860 {
10861         if (!HAS_DISPLAY(i915))
10862                 return;
10863
10864         intel_display_debugfs_register(i915);
10865
10866         /* Must be done after probing outputs */
10867         intel_opregion_register(i915);
10868         acpi_video_register();
10869
10870         intel_audio_init(i915);
10871
10872         /*
10873          * Some ports require correctly set-up hpd registers for
10874          * detection to work properly (leading to ghost connected
10875          * connector status), e.g. VGA on gm45.  Hence we can only set
10876          * up the initial fbdev config after hpd irqs are fully
10877          * enabled. We do it last so that the async config cannot run
10878          * before the connectors are registered.
10879          */
10880         intel_fbdev_initial_config_async(&i915->drm);
10881
10882         /*
10883          * We need to coordinate the hotplugs with the asynchronous
10884          * fbdev configuration, for which we use the
10885          * fbdev->async_cookie.
10886          */
10887         drm_kms_helper_poll_init(&i915->drm);
10888 }
10889
10890 void intel_display_driver_unregister(struct drm_i915_private *i915)
10891 {
10892         if (!HAS_DISPLAY(i915))
10893                 return;
10894
10895         intel_fbdev_unregister(i915);
10896         intel_audio_deinit(i915);
10897
10898         /*
10899          * After flushing the fbdev (incl. a late async config which
10900          * will have delayed queuing of a hotplug event), then flush
10901          * the hotplug events.
10902          */
10903         drm_kms_helper_poll_fini(&i915->drm);
10904         drm_atomic_helper_shutdown(&i915->drm);
10905
10906         acpi_video_unregister();
10907         intel_opregion_unregister(i915);
10908 }