drm/i915/display: fix comment on skl straps
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_display_debugfs.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dp_mst.h"
51 #include "display/intel_dpll.h"
52 #include "display/intel_dpll_mgr.h"
53 #include "display/intel_dsi.h"
54 #include "display/intel_dvo.h"
55 #include "display/intel_gmbus.h"
56 #include "display/intel_hdmi.h"
57 #include "display/intel_lvds.h"
58 #include "display/intel_sdvo.h"
59 #include "display/intel_tv.h"
60 #include "display/intel_vdsc.h"
61 #include "display/intel_vrr.h"
62
63 #include "gem/i915_gem_object.h"
64
65 #include "gt/intel_rps.h"
66
67 #include "i915_drv.h"
68 #include "i915_trace.h"
69 #include "intel_acpi.h"
70 #include "intel_atomic.h"
71 #include "intel_atomic_plane.h"
72 #include "intel_bw.h"
73 #include "intel_cdclk.h"
74 #include "intel_color.h"
75 #include "intel_crtc.h"
76 #include "intel_csr.h"
77 #include "intel_display_types.h"
78 #include "intel_dp_link_training.h"
79 #include "intel_fbc.h"
80 #include "intel_fdi.h"
81 #include "intel_fbdev.h"
82 #include "intel_fifo_underrun.h"
83 #include "intel_frontbuffer.h"
84 #include "intel_hdcp.h"
85 #include "intel_hotplug.h"
86 #include "intel_overlay.h"
87 #include "intel_pipe_crc.h"
88 #include "intel_pm.h"
89 #include "intel_pps.h"
90 #include "intel_psr.h"
91 #include "intel_quirks.h"
92 #include "intel_sideband.h"
93 #include "intel_sprite.h"
94 #include "intel_tc.h"
95 #include "intel_vga.h"
96 #include "i9xx_plane.h"
97 #include "skl_scaler.h"
98 #include "skl_universal_plane.h"
99
100 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
101                                 struct intel_crtc_state *pipe_config);
102 static void ilk_pch_clock_get(struct intel_crtc *crtc,
103                               struct intel_crtc_state *pipe_config);
104
105 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
106                                   struct drm_i915_gem_object *obj,
107                                   struct drm_mode_fb_cmd2 *mode_cmd);
108 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
109 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
110 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
111                                          const struct intel_link_m_n *m_n,
112                                          const struct intel_link_m_n *m2_n2);
113 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
114 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
115 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
116 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
117 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
118 static void intel_modeset_setup_hw_state(struct drm_device *dev,
119                                          struct drm_modeset_acquire_ctx *ctx);
120
121 /* returns HPLL frequency in kHz */
122 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
123 {
124         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
125
126         /* Obtain SKU information */
127         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
128                 CCK_FUSE_HPLL_FREQ_MASK;
129
130         return vco_freq[hpll_freq] * 1000;
131 }
132
133 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
134                       const char *name, u32 reg, int ref_freq)
135 {
136         u32 val;
137         int divider;
138
139         val = vlv_cck_read(dev_priv, reg);
140         divider = val & CCK_FREQUENCY_VALUES;
141
142         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
143                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
144                  "%s change in progress\n", name);
145
146         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
147 }
148
149 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
150                            const char *name, u32 reg)
151 {
152         int hpll;
153
154         vlv_cck_get(dev_priv);
155
156         if (dev_priv->hpll_freq == 0)
157                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
158
159         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
160
161         vlv_cck_put(dev_priv);
162
163         return hpll;
164 }
165
166 static void intel_update_czclk(struct drm_i915_private *dev_priv)
167 {
168         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
169                 return;
170
171         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
172                                                       CCK_CZ_CLOCK_CONTROL);
173
174         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
175                 dev_priv->czclk_freq);
176 }
177
178 /* WA Display #0827: Gen9:all */
179 static void
180 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
181 {
182         if (enable)
183                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
184                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
185         else
186                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
187                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
188 }
189
190 /* Wa_2006604312:icl,ehl */
191 static void
192 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
193                        bool enable)
194 {
195         if (enable)
196                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
197                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
198         else
199                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
200                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
201 }
202
203 static bool
204 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
205 {
206         return crtc_state->master_transcoder != INVALID_TRANSCODER;
207 }
208
209 static bool
210 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
211 {
212         return crtc_state->sync_mode_slaves_mask != 0;
213 }
214
215 bool
216 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
217 {
218         return is_trans_port_sync_master(crtc_state) ||
219                 is_trans_port_sync_slave(crtc_state);
220 }
221
222 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
223                                     enum pipe pipe)
224 {
225         i915_reg_t reg = PIPEDSL(pipe);
226         u32 line1, line2;
227         u32 line_mask;
228
229         if (IS_GEN(dev_priv, 2))
230                 line_mask = DSL_LINEMASK_GEN2;
231         else
232                 line_mask = DSL_LINEMASK_GEN3;
233
234         line1 = intel_de_read(dev_priv, reg) & line_mask;
235         msleep(5);
236         line2 = intel_de_read(dev_priv, reg) & line_mask;
237
238         return line1 != line2;
239 }
240
241 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
242 {
243         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
244         enum pipe pipe = crtc->pipe;
245
246         /* Wait for the display line to settle/start moving */
247         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
248                 drm_err(&dev_priv->drm,
249                         "pipe %c scanline %s wait timed out\n",
250                         pipe_name(pipe), onoff(state));
251 }
252
253 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
254 {
255         wait_for_pipe_scanline_moving(crtc, false);
256 }
257
258 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
259 {
260         wait_for_pipe_scanline_moving(crtc, true);
261 }
262
263 static void
264 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
265 {
266         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
267         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
268
269         if (INTEL_GEN(dev_priv) >= 4) {
270                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
271                 i915_reg_t reg = PIPECONF(cpu_transcoder);
272
273                 /* Wait for the Pipe State to go off */
274                 if (intel_de_wait_for_clear(dev_priv, reg,
275                                             I965_PIPECONF_ACTIVE, 100))
276                         drm_WARN(&dev_priv->drm, 1,
277                                  "pipe_off wait timed out\n");
278         } else {
279                 intel_wait_for_pipe_scanline_stopped(crtc);
280         }
281 }
282
283 /* Only for pre-ILK configs */
284 void assert_pll(struct drm_i915_private *dev_priv,
285                 enum pipe pipe, bool state)
286 {
287         u32 val;
288         bool cur_state;
289
290         val = intel_de_read(dev_priv, DPLL(pipe));
291         cur_state = !!(val & DPLL_VCO_ENABLE);
292         I915_STATE_WARN(cur_state != state,
293              "PLL state assertion failure (expected %s, current %s)\n",
294                         onoff(state), onoff(cur_state));
295 }
296
297 /* XXX: the dsi pll is shared between MIPI DSI ports */
298 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
299 {
300         u32 val;
301         bool cur_state;
302
303         vlv_cck_get(dev_priv);
304         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
305         vlv_cck_put(dev_priv);
306
307         cur_state = val & DSI_PLL_VCO_EN;
308         I915_STATE_WARN(cur_state != state,
309              "DSI PLL state assertion failure (expected %s, current %s)\n",
310                         onoff(state), onoff(cur_state));
311 }
312
313 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
314                           enum pipe pipe, bool state)
315 {
316         bool cur_state;
317
318         if (HAS_DDI(dev_priv)) {
319                 /*
320                  * DDI does not have a specific FDI_TX register.
321                  *
322                  * FDI is never fed from EDP transcoder
323                  * so pipe->transcoder cast is fine here.
324                  */
325                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
326                 u32 val = intel_de_read(dev_priv,
327                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
328                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
329         } else {
330                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
331                 cur_state = !!(val & FDI_TX_ENABLE);
332         }
333         I915_STATE_WARN(cur_state != state,
334              "FDI TX state assertion failure (expected %s, current %s)\n",
335                         onoff(state), onoff(cur_state));
336 }
337 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
338 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
339
340 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
341                           enum pipe pipe, bool state)
342 {
343         u32 val;
344         bool cur_state;
345
346         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
347         cur_state = !!(val & FDI_RX_ENABLE);
348         I915_STATE_WARN(cur_state != state,
349              "FDI RX state assertion failure (expected %s, current %s)\n",
350                         onoff(state), onoff(cur_state));
351 }
352 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
353 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
354
355 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
356                                       enum pipe pipe)
357 {
358         u32 val;
359
360         /* ILK FDI PLL is always enabled */
361         if (IS_GEN(dev_priv, 5))
362                 return;
363
364         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
365         if (HAS_DDI(dev_priv))
366                 return;
367
368         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
369         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
370 }
371
372 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
373                        enum pipe pipe, bool state)
374 {
375         u32 val;
376         bool cur_state;
377
378         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
379         cur_state = !!(val & FDI_RX_PLL_ENABLE);
380         I915_STATE_WARN(cur_state != state,
381              "FDI RX PLL assertion failure (expected %s, current %s)\n",
382                         onoff(state), onoff(cur_state));
383 }
384
385 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
386 {
387         i915_reg_t pp_reg;
388         u32 val;
389         enum pipe panel_pipe = INVALID_PIPE;
390         bool locked = true;
391
392         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
393                 return;
394
395         if (HAS_PCH_SPLIT(dev_priv)) {
396                 u32 port_sel;
397
398                 pp_reg = PP_CONTROL(0);
399                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
400
401                 switch (port_sel) {
402                 case PANEL_PORT_SELECT_LVDS:
403                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
404                         break;
405                 case PANEL_PORT_SELECT_DPA:
406                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
407                         break;
408                 case PANEL_PORT_SELECT_DPC:
409                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
410                         break;
411                 case PANEL_PORT_SELECT_DPD:
412                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
413                         break;
414                 default:
415                         MISSING_CASE(port_sel);
416                         break;
417                 }
418         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
419                 /* presumably write lock depends on pipe, not port select */
420                 pp_reg = PP_CONTROL(pipe);
421                 panel_pipe = pipe;
422         } else {
423                 u32 port_sel;
424
425                 pp_reg = PP_CONTROL(0);
426                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
427
428                 drm_WARN_ON(&dev_priv->drm,
429                             port_sel != PANEL_PORT_SELECT_LVDS);
430                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
431         }
432
433         val = intel_de_read(dev_priv, pp_reg);
434         if (!(val & PANEL_POWER_ON) ||
435             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
436                 locked = false;
437
438         I915_STATE_WARN(panel_pipe == pipe && locked,
439              "panel assertion failure, pipe %c regs locked\n",
440              pipe_name(pipe));
441 }
442
443 void assert_pipe(struct drm_i915_private *dev_priv,
444                  enum transcoder cpu_transcoder, bool state)
445 {
446         bool cur_state;
447         enum intel_display_power_domain power_domain;
448         intel_wakeref_t wakeref;
449
450         /* we keep both pipes enabled on 830 */
451         if (IS_I830(dev_priv))
452                 state = true;
453
454         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
455         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
456         if (wakeref) {
457                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
458                 cur_state = !!(val & PIPECONF_ENABLE);
459
460                 intel_display_power_put(dev_priv, power_domain, wakeref);
461         } else {
462                 cur_state = false;
463         }
464
465         I915_STATE_WARN(cur_state != state,
466                         "transcoder %s assertion failure (expected %s, current %s)\n",
467                         transcoder_name(cpu_transcoder),
468                         onoff(state), onoff(cur_state));
469 }
470
471 static void assert_plane(struct intel_plane *plane, bool state)
472 {
473         enum pipe pipe;
474         bool cur_state;
475
476         cur_state = plane->get_hw_state(plane, &pipe);
477
478         I915_STATE_WARN(cur_state != state,
479                         "%s assertion failure (expected %s, current %s)\n",
480                         plane->base.name, onoff(state), onoff(cur_state));
481 }
482
483 #define assert_plane_enabled(p) assert_plane(p, true)
484 #define assert_plane_disabled(p) assert_plane(p, false)
485
486 static void assert_planes_disabled(struct intel_crtc *crtc)
487 {
488         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
489         struct intel_plane *plane;
490
491         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
492                 assert_plane_disabled(plane);
493 }
494
495 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
496                                     enum pipe pipe)
497 {
498         u32 val;
499         bool enabled;
500
501         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
502         enabled = !!(val & TRANS_ENABLE);
503         I915_STATE_WARN(enabled,
504              "transcoder assertion failed, should be off on pipe %c but is still active\n",
505              pipe_name(pipe));
506 }
507
508 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
509                                    enum pipe pipe, enum port port,
510                                    i915_reg_t dp_reg)
511 {
512         enum pipe port_pipe;
513         bool state;
514
515         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
516
517         I915_STATE_WARN(state && port_pipe == pipe,
518                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
519                         port_name(port), pipe_name(pipe));
520
521         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
522                         "IBX PCH DP %c still using transcoder B\n",
523                         port_name(port));
524 }
525
526 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
527                                      enum pipe pipe, enum port port,
528                                      i915_reg_t hdmi_reg)
529 {
530         enum pipe port_pipe;
531         bool state;
532
533         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
534
535         I915_STATE_WARN(state && port_pipe == pipe,
536                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
537                         port_name(port), pipe_name(pipe));
538
539         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
540                         "IBX PCH HDMI %c still using transcoder B\n",
541                         port_name(port));
542 }
543
544 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
545                                       enum pipe pipe)
546 {
547         enum pipe port_pipe;
548
549         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
550         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
551         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
552
553         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
554                         port_pipe == pipe,
555                         "PCH VGA enabled on transcoder %c, should be disabled\n",
556                         pipe_name(pipe));
557
558         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
559                         port_pipe == pipe,
560                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
561                         pipe_name(pipe));
562
563         /* PCH SDVOB multiplex with HDMIB */
564         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
565         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
566         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
567 }
568
569 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
570                          struct intel_digital_port *dig_port,
571                          unsigned int expected_mask)
572 {
573         u32 port_mask;
574         i915_reg_t dpll_reg;
575
576         switch (dig_port->base.port) {
577         case PORT_B:
578                 port_mask = DPLL_PORTB_READY_MASK;
579                 dpll_reg = DPLL(0);
580                 break;
581         case PORT_C:
582                 port_mask = DPLL_PORTC_READY_MASK;
583                 dpll_reg = DPLL(0);
584                 expected_mask <<= 4;
585                 break;
586         case PORT_D:
587                 port_mask = DPLL_PORTD_READY_MASK;
588                 dpll_reg = DPIO_PHY_STATUS;
589                 break;
590         default:
591                 BUG();
592         }
593
594         if (intel_de_wait_for_register(dev_priv, dpll_reg,
595                                        port_mask, expected_mask, 1000))
596                 drm_WARN(&dev_priv->drm, 1,
597                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
598                          dig_port->base.base.base.id, dig_port->base.base.name,
599                          intel_de_read(dev_priv, dpll_reg) & port_mask,
600                          expected_mask);
601 }
602
603 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
604 {
605         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
606         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
607         enum pipe pipe = crtc->pipe;
608         i915_reg_t reg;
609         u32 val, pipeconf_val;
610
611         /* Make sure PCH DPLL is enabled */
612         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
613
614         /* FDI must be feeding us bits for PCH ports */
615         assert_fdi_tx_enabled(dev_priv, pipe);
616         assert_fdi_rx_enabled(dev_priv, pipe);
617
618         if (HAS_PCH_CPT(dev_priv)) {
619                 reg = TRANS_CHICKEN2(pipe);
620                 val = intel_de_read(dev_priv, reg);
621                 /*
622                  * Workaround: Set the timing override bit
623                  * before enabling the pch transcoder.
624                  */
625                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
626                 /* Configure frame start delay to match the CPU */
627                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
628                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
629                 intel_de_write(dev_priv, reg, val);
630         }
631
632         reg = PCH_TRANSCONF(pipe);
633         val = intel_de_read(dev_priv, reg);
634         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
635
636         if (HAS_PCH_IBX(dev_priv)) {
637                 /* Configure frame start delay to match the CPU */
638                 val &= ~TRANS_FRAME_START_DELAY_MASK;
639                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
640
641                 /*
642                  * Make the BPC in transcoder be consistent with
643                  * that in pipeconf reg. For HDMI we must use 8bpc
644                  * here for both 8bpc and 12bpc.
645                  */
646                 val &= ~PIPECONF_BPC_MASK;
647                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
648                         val |= PIPECONF_8BPC;
649                 else
650                         val |= pipeconf_val & PIPECONF_BPC_MASK;
651         }
652
653         val &= ~TRANS_INTERLACE_MASK;
654         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
655                 if (HAS_PCH_IBX(dev_priv) &&
656                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
657                         val |= TRANS_LEGACY_INTERLACED_ILK;
658                 else
659                         val |= TRANS_INTERLACED;
660         } else {
661                 val |= TRANS_PROGRESSIVE;
662         }
663
664         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
665         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
666                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
667                         pipe_name(pipe));
668 }
669
670 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
671                                       enum transcoder cpu_transcoder)
672 {
673         u32 val, pipeconf_val;
674
675         /* FDI must be feeding us bits for PCH ports */
676         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
677         assert_fdi_rx_enabled(dev_priv, PIPE_A);
678
679         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
680         /* Workaround: set timing override bit. */
681         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
682         /* Configure frame start delay to match the CPU */
683         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
684         val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
685         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
686
687         val = TRANS_ENABLE;
688         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
689
690         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
691             PIPECONF_INTERLACED_ILK)
692                 val |= TRANS_INTERLACED;
693         else
694                 val |= TRANS_PROGRESSIVE;
695
696         intel_de_write(dev_priv, LPT_TRANSCONF, val);
697         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
698                                   TRANS_STATE_ENABLE, 100))
699                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
700 }
701
702 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
703                                        enum pipe pipe)
704 {
705         i915_reg_t reg;
706         u32 val;
707
708         /* FDI relies on the transcoder */
709         assert_fdi_tx_disabled(dev_priv, pipe);
710         assert_fdi_rx_disabled(dev_priv, pipe);
711
712         /* Ports must be off as well */
713         assert_pch_ports_disabled(dev_priv, pipe);
714
715         reg = PCH_TRANSCONF(pipe);
716         val = intel_de_read(dev_priv, reg);
717         val &= ~TRANS_ENABLE;
718         intel_de_write(dev_priv, reg, val);
719         /* wait for PCH transcoder off, transcoder state */
720         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
721                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
722                         pipe_name(pipe));
723
724         if (HAS_PCH_CPT(dev_priv)) {
725                 /* Workaround: Clear the timing override chicken bit again. */
726                 reg = TRANS_CHICKEN2(pipe);
727                 val = intel_de_read(dev_priv, reg);
728                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
729                 intel_de_write(dev_priv, reg, val);
730         }
731 }
732
733 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
734 {
735         u32 val;
736
737         val = intel_de_read(dev_priv, LPT_TRANSCONF);
738         val &= ~TRANS_ENABLE;
739         intel_de_write(dev_priv, LPT_TRANSCONF, val);
740         /* wait for PCH transcoder off, transcoder state */
741         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
742                                     TRANS_STATE_ENABLE, 50))
743                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
744
745         /* Workaround: clear timing override bit. */
746         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
747         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
748         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
749 }
750
751 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
752 {
753         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
754
755         if (HAS_PCH_LPT(dev_priv))
756                 return PIPE_A;
757         else
758                 return crtc->pipe;
759 }
760
761 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
762 {
763         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
764         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
765         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
766         enum pipe pipe = crtc->pipe;
767         i915_reg_t reg;
768         u32 val;
769
770         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
771
772         assert_planes_disabled(crtc);
773
774         /*
775          * A pipe without a PLL won't actually be able to drive bits from
776          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
777          * need the check.
778          */
779         if (HAS_GMCH(dev_priv)) {
780                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
781                         assert_dsi_pll_enabled(dev_priv);
782                 else
783                         assert_pll_enabled(dev_priv, pipe);
784         } else {
785                 if (new_crtc_state->has_pch_encoder) {
786                         /* if driving the PCH, we need FDI enabled */
787                         assert_fdi_rx_pll_enabled(dev_priv,
788                                                   intel_crtc_pch_transcoder(crtc));
789                         assert_fdi_tx_pll_enabled(dev_priv,
790                                                   (enum pipe) cpu_transcoder);
791                 }
792                 /* FIXME: assert CPU port conditions for SNB+ */
793         }
794
795         trace_intel_pipe_enable(crtc);
796
797         reg = PIPECONF(cpu_transcoder);
798         val = intel_de_read(dev_priv, reg);
799         if (val & PIPECONF_ENABLE) {
800                 /* we keep both pipes enabled on 830 */
801                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
802                 return;
803         }
804
805         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
806         intel_de_posting_read(dev_priv, reg);
807
808         /*
809          * Until the pipe starts PIPEDSL reads will return a stale value,
810          * which causes an apparent vblank timestamp jump when PIPEDSL
811          * resets to its proper value. That also messes up the frame count
812          * when it's derived from the timestamps. So let's wait for the
813          * pipe to start properly before we call drm_crtc_vblank_on()
814          */
815         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
816                 intel_wait_for_pipe_scanline_moving(crtc);
817 }
818
819 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
820 {
821         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
822         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
823         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
824         enum pipe pipe = crtc->pipe;
825         i915_reg_t reg;
826         u32 val;
827
828         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
829
830         /*
831          * Make sure planes won't keep trying to pump pixels to us,
832          * or we might hang the display.
833          */
834         assert_planes_disabled(crtc);
835
836         trace_intel_pipe_disable(crtc);
837
838         reg = PIPECONF(cpu_transcoder);
839         val = intel_de_read(dev_priv, reg);
840         if ((val & PIPECONF_ENABLE) == 0)
841                 return;
842
843         /*
844          * Double wide has implications for planes
845          * so best keep it disabled when not needed.
846          */
847         if (old_crtc_state->double_wide)
848                 val &= ~PIPECONF_DOUBLE_WIDE;
849
850         /* Don't disable pipe or pipe PLLs if needed */
851         if (!IS_I830(dev_priv))
852                 val &= ~PIPECONF_ENABLE;
853
854         intel_de_write(dev_priv, reg, val);
855         if ((val & PIPECONF_ENABLE) == 0)
856                 intel_wait_for_pipe_off(old_crtc_state);
857 }
858
859 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
860 {
861         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
862 }
863
864 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
865 {
866         if (is_ccs_modifier(fb->modifier))
867                 return is_ccs_plane(fb, plane);
868
869         return plane == 1;
870 }
871
872 bool
873 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
874                                     u64 modifier)
875 {
876         return info->is_yuv &&
877                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
878 }
879
880 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
881                                    int color_plane)
882 {
883         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
884                color_plane == 1;
885 }
886
887 unsigned int
888 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
889 {
890         struct drm_i915_private *dev_priv = to_i915(fb->dev);
891         unsigned int cpp = fb->format->cpp[color_plane];
892
893         switch (fb->modifier) {
894         case DRM_FORMAT_MOD_LINEAR:
895                 return intel_tile_size(dev_priv);
896         case I915_FORMAT_MOD_X_TILED:
897                 if (IS_GEN(dev_priv, 2))
898                         return 128;
899                 else
900                         return 512;
901         case I915_FORMAT_MOD_Y_TILED_CCS:
902                 if (is_ccs_plane(fb, color_plane))
903                         return 128;
904                 fallthrough;
905         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
906         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
907         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
908                 if (is_ccs_plane(fb, color_plane))
909                         return 64;
910                 fallthrough;
911         case I915_FORMAT_MOD_Y_TILED:
912                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
913                         return 128;
914                 else
915                         return 512;
916         case I915_FORMAT_MOD_Yf_TILED_CCS:
917                 if (is_ccs_plane(fb, color_plane))
918                         return 128;
919                 fallthrough;
920         case I915_FORMAT_MOD_Yf_TILED:
921                 switch (cpp) {
922                 case 1:
923                         return 64;
924                 case 2:
925                 case 4:
926                         return 128;
927                 case 8:
928                 case 16:
929                         return 256;
930                 default:
931                         MISSING_CASE(cpp);
932                         return cpp;
933                 }
934                 break;
935         default:
936                 MISSING_CASE(fb->modifier);
937                 return cpp;
938         }
939 }
940
941 unsigned int
942 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
943 {
944         if (is_gen12_ccs_plane(fb, color_plane))
945                 return 1;
946
947         return intel_tile_size(to_i915(fb->dev)) /
948                 intel_tile_width_bytes(fb, color_plane);
949 }
950
951 /* Return the tile dimensions in pixel units */
952 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
953                             unsigned int *tile_width,
954                             unsigned int *tile_height)
955 {
956         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
957         unsigned int cpp = fb->format->cpp[color_plane];
958
959         *tile_width = tile_width_bytes / cpp;
960         *tile_height = intel_tile_height(fb, color_plane);
961 }
962
963 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
964                                         int color_plane)
965 {
966         unsigned int tile_width, tile_height;
967
968         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
969
970         return fb->pitches[color_plane] * tile_height;
971 }
972
973 unsigned int
974 intel_fb_align_height(const struct drm_framebuffer *fb,
975                       int color_plane, unsigned int height)
976 {
977         unsigned int tile_height = intel_tile_height(fb, color_plane);
978
979         return ALIGN(height, tile_height);
980 }
981
982 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
983 {
984         unsigned int size = 0;
985         int i;
986
987         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
988                 size += rot_info->plane[i].width * rot_info->plane[i].height;
989
990         return size;
991 }
992
993 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
994 {
995         unsigned int size = 0;
996         int i;
997
998         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
999                 size += rem_info->plane[i].width * rem_info->plane[i].height;
1000
1001         return size;
1002 }
1003
1004 static void
1005 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1006                         const struct drm_framebuffer *fb,
1007                         unsigned int rotation)
1008 {
1009         view->type = I915_GGTT_VIEW_NORMAL;
1010         if (drm_rotation_90_or_270(rotation)) {
1011                 view->type = I915_GGTT_VIEW_ROTATED;
1012                 view->rotated = to_intel_framebuffer(fb)->rot_info;
1013         }
1014 }
1015
1016 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1017 {
1018         if (IS_I830(dev_priv))
1019                 return 16 * 1024;
1020         else if (IS_I85X(dev_priv))
1021                 return 256;
1022         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1023                 return 32;
1024         else
1025                 return 4 * 1024;
1026 }
1027
1028 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1029 {
1030         if (INTEL_GEN(dev_priv) >= 9)
1031                 return 256 * 1024;
1032         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1033                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1034                 return 128 * 1024;
1035         else if (INTEL_GEN(dev_priv) >= 4)
1036                 return 4 * 1024;
1037         else
1038                 return 0;
1039 }
1040
1041 static bool has_async_flips(struct drm_i915_private *i915)
1042 {
1043         return INTEL_GEN(i915) >= 5;
1044 }
1045
1046 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1047                                   int color_plane)
1048 {
1049         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1050
1051         /* AUX_DIST needs only 4K alignment */
1052         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
1053             is_ccs_plane(fb, color_plane))
1054                 return 4096;
1055
1056         switch (fb->modifier) {
1057         case DRM_FORMAT_MOD_LINEAR:
1058                 return intel_linear_alignment(dev_priv);
1059         case I915_FORMAT_MOD_X_TILED:
1060                 if (has_async_flips(dev_priv))
1061                         return 256 * 1024;
1062                 return 0;
1063         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1064                 if (is_semiplanar_uv_plane(fb, color_plane))
1065                         return intel_tile_row_size(fb, color_plane);
1066                 fallthrough;
1067         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1068         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1069                 return 16 * 1024;
1070         case I915_FORMAT_MOD_Y_TILED_CCS:
1071         case I915_FORMAT_MOD_Yf_TILED_CCS:
1072         case I915_FORMAT_MOD_Y_TILED:
1073                 if (INTEL_GEN(dev_priv) >= 12 &&
1074                     is_semiplanar_uv_plane(fb, color_plane))
1075                         return intel_tile_row_size(fb, color_plane);
1076                 fallthrough;
1077         case I915_FORMAT_MOD_Yf_TILED:
1078                 return 1 * 1024 * 1024;
1079         default:
1080                 MISSING_CASE(fb->modifier);
1081                 return 0;
1082         }
1083 }
1084
1085 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1086 {
1087         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1088         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1089
1090         return INTEL_GEN(dev_priv) < 4 ||
1091                 (plane->has_fbc &&
1092                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
1093 }
1094
1095 struct i915_vma *
1096 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1097                            const struct i915_ggtt_view *view,
1098                            bool uses_fence,
1099                            unsigned long *out_flags)
1100 {
1101         struct drm_device *dev = fb->dev;
1102         struct drm_i915_private *dev_priv = to_i915(dev);
1103         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1104         intel_wakeref_t wakeref;
1105         struct i915_vma *vma;
1106         unsigned int pinctl;
1107         u32 alignment;
1108
1109         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1110                 return ERR_PTR(-EINVAL);
1111
1112         alignment = intel_surf_alignment(fb, 0);
1113         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1114                 return ERR_PTR(-EINVAL);
1115
1116         /* Note that the w/a also requires 64 PTE of padding following the
1117          * bo. We currently fill all unused PTE with the shadow page and so
1118          * we should always have valid PTE following the scanout preventing
1119          * the VT-d warning.
1120          */
1121         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1122                 alignment = 256 * 1024;
1123
1124         /*
1125          * Global gtt pte registers are special registers which actually forward
1126          * writes to a chunk of system memory. Which means that there is no risk
1127          * that the register values disappear as soon as we call
1128          * intel_runtime_pm_put(), so it is correct to wrap only the
1129          * pin/unpin/fence and not more.
1130          */
1131         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1132
1133         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1134
1135         /*
1136          * Valleyview is definitely limited to scanning out the first
1137          * 512MiB. Lets presume this behaviour was inherited from the
1138          * g4x display engine and that all earlier gen are similarly
1139          * limited. Testing suggests that it is a little more
1140          * complicated than this. For example, Cherryview appears quite
1141          * happy to scanout from anywhere within its global aperture.
1142          */
1143         pinctl = 0;
1144         if (HAS_GMCH(dev_priv))
1145                 pinctl |= PIN_MAPPABLE;
1146
1147         vma = i915_gem_object_pin_to_display_plane(obj,
1148                                                    alignment, view, pinctl);
1149         if (IS_ERR(vma))
1150                 goto err;
1151
1152         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1153                 int ret;
1154
1155                 /*
1156                  * Install a fence for tiled scan-out. Pre-i965 always needs a
1157                  * fence, whereas 965+ only requires a fence if using
1158                  * framebuffer compression.  For simplicity, we always, when
1159                  * possible, install a fence as the cost is not that onerous.
1160                  *
1161                  * If we fail to fence the tiled scanout, then either the
1162                  * modeset will reject the change (which is highly unlikely as
1163                  * the affected systems, all but one, do not have unmappable
1164                  * space) or we will not be able to enable full powersaving
1165                  * techniques (also likely not to apply due to various limits
1166                  * FBC and the like impose on the size of the buffer, which
1167                  * presumably we violated anyway with this unmappable buffer).
1168                  * Anyway, it is presumably better to stumble onwards with
1169                  * something and try to run the system in a "less than optimal"
1170                  * mode that matches the user configuration.
1171                  */
1172                 ret = i915_vma_pin_fence(vma);
1173                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1174                         i915_gem_object_unpin_from_display_plane(vma);
1175                         vma = ERR_PTR(ret);
1176                         goto err;
1177                 }
1178
1179                 if (ret == 0 && vma->fence)
1180                         *out_flags |= PLANE_HAS_FENCE;
1181         }
1182
1183         i915_vma_get(vma);
1184 err:
1185         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1186         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1187         return vma;
1188 }
1189
1190 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1191 {
1192         i915_gem_object_lock(vma->obj, NULL);
1193         if (flags & PLANE_HAS_FENCE)
1194                 i915_vma_unpin_fence(vma);
1195         i915_gem_object_unpin_from_display_plane(vma);
1196         i915_gem_object_unlock(vma->obj);
1197
1198         i915_vma_put(vma);
1199 }
1200
1201 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
1202                           unsigned int rotation)
1203 {
1204         if (drm_rotation_90_or_270(rotation))
1205                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
1206         else
1207                 return fb->pitches[color_plane];
1208 }
1209
1210 /*
1211  * Convert the x/y offsets into a linear offset.
1212  * Only valid with 0/180 degree rotation, which is fine since linear
1213  * offset is only used with linear buffers on pre-hsw and tiled buffers
1214  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1215  */
1216 u32 intel_fb_xy_to_linear(int x, int y,
1217                           const struct intel_plane_state *state,
1218                           int color_plane)
1219 {
1220         const struct drm_framebuffer *fb = state->hw.fb;
1221         unsigned int cpp = fb->format->cpp[color_plane];
1222         unsigned int pitch = state->color_plane[color_plane].stride;
1223
1224         return y * pitch + x * cpp;
1225 }
1226
1227 /*
1228  * Add the x/y offsets derived from fb->offsets[] to the user
1229  * specified plane src x/y offsets. The resulting x/y offsets
1230  * specify the start of scanout from the beginning of the gtt mapping.
1231  */
1232 void intel_add_fb_offsets(int *x, int *y,
1233                           const struct intel_plane_state *state,
1234                           int color_plane)
1235
1236 {
1237         *x += state->color_plane[color_plane].x;
1238         *y += state->color_plane[color_plane].y;
1239 }
1240
1241 static u32 intel_adjust_tile_offset(int *x, int *y,
1242                                     unsigned int tile_width,
1243                                     unsigned int tile_height,
1244                                     unsigned int tile_size,
1245                                     unsigned int pitch_tiles,
1246                                     u32 old_offset,
1247                                     u32 new_offset)
1248 {
1249         unsigned int pitch_pixels = pitch_tiles * tile_width;
1250         unsigned int tiles;
1251
1252         WARN_ON(old_offset & (tile_size - 1));
1253         WARN_ON(new_offset & (tile_size - 1));
1254         WARN_ON(new_offset > old_offset);
1255
1256         tiles = (old_offset - new_offset) / tile_size;
1257
1258         *y += tiles / pitch_tiles * tile_height;
1259         *x += tiles % pitch_tiles * tile_width;
1260
1261         /* minimize x in case it got needlessly big */
1262         *y += *x / pitch_pixels * tile_height;
1263         *x %= pitch_pixels;
1264
1265         return new_offset;
1266 }
1267
1268 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
1269 {
1270         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
1271                is_gen12_ccs_plane(fb, color_plane);
1272 }
1273
1274 static u32 intel_adjust_aligned_offset(int *x, int *y,
1275                                        const struct drm_framebuffer *fb,
1276                                        int color_plane,
1277                                        unsigned int rotation,
1278                                        unsigned int pitch,
1279                                        u32 old_offset, u32 new_offset)
1280 {
1281         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1282         unsigned int cpp = fb->format->cpp[color_plane];
1283
1284         drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
1285
1286         if (!is_surface_linear(fb, color_plane)) {
1287                 unsigned int tile_size, tile_width, tile_height;
1288                 unsigned int pitch_tiles;
1289
1290                 tile_size = intel_tile_size(dev_priv);
1291                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1292
1293                 if (drm_rotation_90_or_270(rotation)) {
1294                         pitch_tiles = pitch / tile_height;
1295                         swap(tile_width, tile_height);
1296                 } else {
1297                         pitch_tiles = pitch / (tile_width * cpp);
1298                 }
1299
1300                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1301                                          tile_size, pitch_tiles,
1302                                          old_offset, new_offset);
1303         } else {
1304                 old_offset += *y * pitch + *x * cpp;
1305
1306                 *y = (old_offset - new_offset) / pitch;
1307                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
1308         }
1309
1310         return new_offset;
1311 }
1312
1313 /*
1314  * Adjust the tile offset by moving the difference into
1315  * the x/y offsets.
1316  */
1317 u32 intel_plane_adjust_aligned_offset(int *x, int *y,
1318                                       const struct intel_plane_state *state,
1319                                       int color_plane,
1320                                       u32 old_offset, u32 new_offset)
1321 {
1322         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
1323                                            state->hw.rotation,
1324                                            state->color_plane[color_plane].stride,
1325                                            old_offset, new_offset);
1326 }
1327
1328 /*
1329  * Computes the aligned offset to the base tile and adjusts
1330  * x, y. bytes per pixel is assumed to be a power-of-two.
1331  *
1332  * In the 90/270 rotated case, x and y are assumed
1333  * to be already rotated to match the rotated GTT view, and
1334  * pitch is the tile_height aligned framebuffer height.
1335  *
1336  * This function is used when computing the derived information
1337  * under intel_framebuffer, so using any of that information
1338  * here is not allowed. Anything under drm_framebuffer can be
1339  * used. This is why the user has to pass in the pitch since it
1340  * is specified in the rotated orientation.
1341  */
1342 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
1343                                         int *x, int *y,
1344                                         const struct drm_framebuffer *fb,
1345                                         int color_plane,
1346                                         unsigned int pitch,
1347                                         unsigned int rotation,
1348                                         u32 alignment)
1349 {
1350         unsigned int cpp = fb->format->cpp[color_plane];
1351         u32 offset, offset_aligned;
1352
1353         if (!is_surface_linear(fb, color_plane)) {
1354                 unsigned int tile_size, tile_width, tile_height;
1355                 unsigned int tile_rows, tiles, pitch_tiles;
1356
1357                 tile_size = intel_tile_size(dev_priv);
1358                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1359
1360                 if (drm_rotation_90_or_270(rotation)) {
1361                         pitch_tiles = pitch / tile_height;
1362                         swap(tile_width, tile_height);
1363                 } else {
1364                         pitch_tiles = pitch / (tile_width * cpp);
1365                 }
1366
1367                 tile_rows = *y / tile_height;
1368                 *y %= tile_height;
1369
1370                 tiles = *x / tile_width;
1371                 *x %= tile_width;
1372
1373                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
1374
1375                 offset_aligned = offset;
1376                 if (alignment)
1377                         offset_aligned = rounddown(offset_aligned, alignment);
1378
1379                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1380                                          tile_size, pitch_tiles,
1381                                          offset, offset_aligned);
1382         } else {
1383                 offset = *y * pitch + *x * cpp;
1384                 offset_aligned = offset;
1385                 if (alignment) {
1386                         offset_aligned = rounddown(offset_aligned, alignment);
1387                         *y = (offset % alignment) / pitch;
1388                         *x = ((offset % alignment) - *y * pitch) / cpp;
1389                 } else {
1390                         *y = *x = 0;
1391                 }
1392         }
1393
1394         return offset_aligned;
1395 }
1396
1397 u32 intel_plane_compute_aligned_offset(int *x, int *y,
1398                                        const struct intel_plane_state *state,
1399                                        int color_plane)
1400 {
1401         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
1402         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
1403         const struct drm_framebuffer *fb = state->hw.fb;
1404         unsigned int rotation = state->hw.rotation;
1405         int pitch = state->color_plane[color_plane].stride;
1406         u32 alignment;
1407
1408         if (intel_plane->id == PLANE_CURSOR)
1409                 alignment = intel_cursor_alignment(dev_priv);
1410         else
1411                 alignment = intel_surf_alignment(fb, color_plane);
1412
1413         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
1414                                             pitch, rotation, alignment);
1415 }
1416
1417 /* Convert the fb->offset[] into x/y offsets */
1418 static int intel_fb_offset_to_xy(int *x, int *y,
1419                                  const struct drm_framebuffer *fb,
1420                                  int color_plane)
1421 {
1422         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1423         unsigned int height;
1424         u32 alignment;
1425
1426         if (INTEL_GEN(dev_priv) >= 12 &&
1427             is_semiplanar_uv_plane(fb, color_plane))
1428                 alignment = intel_tile_row_size(fb, color_plane);
1429         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
1430                 alignment = intel_tile_size(dev_priv);
1431         else
1432                 alignment = 0;
1433
1434         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
1435                 drm_dbg_kms(&dev_priv->drm,
1436                             "Misaligned offset 0x%08x for color plane %d\n",
1437                             fb->offsets[color_plane], color_plane);
1438                 return -EINVAL;
1439         }
1440
1441         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
1442         height = ALIGN(height, intel_tile_height(fb, color_plane));
1443
1444         /* Catch potential overflows early */
1445         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
1446                             fb->offsets[color_plane])) {
1447                 drm_dbg_kms(&dev_priv->drm,
1448                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
1449                             fb->offsets[color_plane], fb->pitches[color_plane],
1450                             color_plane);
1451                 return -ERANGE;
1452         }
1453
1454         *x = 0;
1455         *y = 0;
1456
1457         intel_adjust_aligned_offset(x, y,
1458                                     fb, color_plane, DRM_MODE_ROTATE_0,
1459                                     fb->pitches[color_plane],
1460                                     fb->offsets[color_plane], 0);
1461
1462         return 0;
1463 }
1464
1465 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1466 {
1467         switch (fb_modifier) {
1468         case I915_FORMAT_MOD_X_TILED:
1469                 return I915_TILING_X;
1470         case I915_FORMAT_MOD_Y_TILED:
1471         case I915_FORMAT_MOD_Y_TILED_CCS:
1472         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1473         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1474         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1475                 return I915_TILING_Y;
1476         default:
1477                 return I915_TILING_NONE;
1478         }
1479 }
1480
1481 /*
1482  * From the Sky Lake PRM:
1483  * "The Color Control Surface (CCS) contains the compression status of
1484  *  the cache-line pairs. The compression state of the cache-line pair
1485  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1486  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1487  *  cache-line-pairs. CCS is always Y tiled."
1488  *
1489  * Since cache line pairs refers to horizontally adjacent cache lines,
1490  * each cache line in the CCS corresponds to an area of 32x16 cache
1491  * lines on the main surface. Since each pixel is 4 bytes, this gives
1492  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1493  * main surface.
1494  */
1495 static const struct drm_format_info skl_ccs_formats[] = {
1496         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1497           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1498         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1499           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1500         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1501           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1502         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1503           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1504 };
1505
1506 /*
1507  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1508  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1509  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1510  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1511  * the main surface.
1512  */
1513 static const struct drm_format_info gen12_ccs_formats[] = {
1514         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1515           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1516           .hsub = 1, .vsub = 1, },
1517         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1518           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1519           .hsub = 1, .vsub = 1, },
1520         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1521           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1522           .hsub = 1, .vsub = 1, .has_alpha = true },
1523         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1524           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1525           .hsub = 1, .vsub = 1, .has_alpha = true },
1526         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1527           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1528           .hsub = 2, .vsub = 1, .is_yuv = true },
1529         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1530           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1531           .hsub = 2, .vsub = 1, .is_yuv = true },
1532         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1533           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1534           .hsub = 2, .vsub = 1, .is_yuv = true },
1535         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1536           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1537           .hsub = 2, .vsub = 1, .is_yuv = true },
1538         { .format = DRM_FORMAT_NV12, .num_planes = 4,
1539           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1540           .hsub = 2, .vsub = 2, .is_yuv = true },
1541         { .format = DRM_FORMAT_P010, .num_planes = 4,
1542           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1543           .hsub = 2, .vsub = 2, .is_yuv = true },
1544         { .format = DRM_FORMAT_P012, .num_planes = 4,
1545           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1546           .hsub = 2, .vsub = 2, .is_yuv = true },
1547         { .format = DRM_FORMAT_P016, .num_planes = 4,
1548           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1549           .hsub = 2, .vsub = 2, .is_yuv = true },
1550 };
1551
1552 /*
1553  * Same as gen12_ccs_formats[] above, but with additional surface used
1554  * to pass Clear Color information in plane 2 with 64 bits of data.
1555  */
1556 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1557         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1558           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1559           .hsub = 1, .vsub = 1, },
1560         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1561           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1562           .hsub = 1, .vsub = 1, },
1563         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1564           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1565           .hsub = 1, .vsub = 1, .has_alpha = true },
1566         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1567           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1568           .hsub = 1, .vsub = 1, .has_alpha = true },
1569 };
1570
1571 static const struct drm_format_info *
1572 lookup_format_info(const struct drm_format_info formats[],
1573                    int num_formats, u32 format)
1574 {
1575         int i;
1576
1577         for (i = 0; i < num_formats; i++) {
1578                 if (formats[i].format == format)
1579                         return &formats[i];
1580         }
1581
1582         return NULL;
1583 }
1584
1585 static const struct drm_format_info *
1586 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1587 {
1588         switch (cmd->modifier[0]) {
1589         case I915_FORMAT_MOD_Y_TILED_CCS:
1590         case I915_FORMAT_MOD_Yf_TILED_CCS:
1591                 return lookup_format_info(skl_ccs_formats,
1592                                           ARRAY_SIZE(skl_ccs_formats),
1593                                           cmd->pixel_format);
1594         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1595         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1596                 return lookup_format_info(gen12_ccs_formats,
1597                                           ARRAY_SIZE(gen12_ccs_formats),
1598                                           cmd->pixel_format);
1599         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1600                 return lookup_format_info(gen12_ccs_cc_formats,
1601                                           ARRAY_SIZE(gen12_ccs_cc_formats),
1602                                           cmd->pixel_format);
1603         default:
1604                 return NULL;
1605         }
1606 }
1607
1608 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1609 {
1610         return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1611                             512) * 64;
1612 }
1613
1614 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1615                               u32 pixel_format, u64 modifier)
1616 {
1617         struct intel_crtc *crtc;
1618         struct intel_plane *plane;
1619
1620         /*
1621          * We assume the primary plane for pipe A has
1622          * the highest stride limits of them all,
1623          * if in case pipe A is disabled, use the first pipe from pipe_mask.
1624          */
1625         crtc = intel_get_first_crtc(dev_priv);
1626         if (!crtc)
1627                 return 0;
1628
1629         plane = to_intel_plane(crtc->base.primary);
1630
1631         return plane->max_stride(plane, pixel_format, modifier,
1632                                  DRM_MODE_ROTATE_0);
1633 }
1634
1635 static
1636 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1637                         u32 pixel_format, u64 modifier)
1638 {
1639         /*
1640          * Arbitrary limit for gen4+ chosen to match the
1641          * render engine max stride.
1642          *
1643          * The new CCS hash mode makes remapping impossible
1644          */
1645         if (!is_ccs_modifier(modifier)) {
1646                 if (INTEL_GEN(dev_priv) >= 7)
1647                         return 256*1024;
1648                 else if (INTEL_GEN(dev_priv) >= 4)
1649                         return 128*1024;
1650         }
1651
1652         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1653 }
1654
1655 static u32
1656 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1657 {
1658         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1659         u32 tile_width;
1660
1661         if (is_surface_linear(fb, color_plane)) {
1662                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1663                                                            fb->format->format,
1664                                                            fb->modifier);
1665
1666                 /*
1667                  * To make remapping with linear generally feasible
1668                  * we need the stride to be page aligned.
1669                  */
1670                 if (fb->pitches[color_plane] > max_stride &&
1671                     !is_ccs_modifier(fb->modifier))
1672                         return intel_tile_size(dev_priv);
1673                 else
1674                         return 64;
1675         }
1676
1677         tile_width = intel_tile_width_bytes(fb, color_plane);
1678         if (is_ccs_modifier(fb->modifier)) {
1679                 /*
1680                  * Display WA #0531: skl,bxt,kbl,glk
1681                  *
1682                  * Render decompression and plane width > 3840
1683                  * combined with horizontal panning requires the
1684                  * plane stride to be a multiple of 4. We'll just
1685                  * require the entire fb to accommodate that to avoid
1686                  * potential runtime errors at plane configuration time.
1687                  */
1688                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1689                         tile_width *= 4;
1690                 /*
1691                  * The main surface pitch must be padded to a multiple of four
1692                  * tile widths.
1693                  */
1694                 else if (INTEL_GEN(dev_priv) >= 12)
1695                         tile_width *= 4;
1696         }
1697         return tile_width;
1698 }
1699
1700 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
1701 {
1702         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1703         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1704         const struct drm_framebuffer *fb = plane_state->hw.fb;
1705         int i;
1706
1707         /* We don't want to deal with remapping with cursors */
1708         if (plane->id == PLANE_CURSOR)
1709                 return false;
1710
1711         /*
1712          * The display engine limits already match/exceed the
1713          * render engine limits, so not much point in remapping.
1714          * Would also need to deal with the fence POT alignment
1715          * and gen2 2KiB GTT tile size.
1716          */
1717         if (INTEL_GEN(dev_priv) < 4)
1718                 return false;
1719
1720         /*
1721          * The new CCS hash mode isn't compatible with remapping as
1722          * the virtual address of the pages affects the compressed data.
1723          */
1724         if (is_ccs_modifier(fb->modifier))
1725                 return false;
1726
1727         /* Linear needs a page aligned stride for remapping */
1728         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1729                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
1730
1731                 for (i = 0; i < fb->format->num_planes; i++) {
1732                         if (fb->pitches[i] & alignment)
1733                                 return false;
1734                 }
1735         }
1736
1737         return true;
1738 }
1739
1740 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
1741 {
1742         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1743         const struct drm_framebuffer *fb = plane_state->hw.fb;
1744         unsigned int rotation = plane_state->hw.rotation;
1745         u32 stride, max_stride;
1746
1747         /*
1748          * No remapping for invisible planes since we don't have
1749          * an actual source viewport to remap.
1750          */
1751         if (!plane_state->uapi.visible)
1752                 return false;
1753
1754         if (!intel_plane_can_remap(plane_state))
1755                 return false;
1756
1757         /*
1758          * FIXME: aux plane limits on gen9+ are
1759          * unclear in Bspec, for now no checking.
1760          */
1761         stride = intel_fb_pitch(fb, 0, rotation);
1762         max_stride = plane->max_stride(plane, fb->format->format,
1763                                        fb->modifier, rotation);
1764
1765         return stride > max_stride;
1766 }
1767
1768 void
1769 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
1770                                const struct drm_framebuffer *fb,
1771                                int color_plane)
1772 {
1773         int main_plane;
1774
1775         if (color_plane == 0) {
1776                 *hsub = 1;
1777                 *vsub = 1;
1778
1779                 return;
1780         }
1781
1782         /*
1783          * TODO: Deduct the subsampling from the char block for all CCS
1784          * formats and planes.
1785          */
1786         if (!is_gen12_ccs_plane(fb, color_plane)) {
1787                 *hsub = fb->format->hsub;
1788                 *vsub = fb->format->vsub;
1789
1790                 return;
1791         }
1792
1793         main_plane = skl_ccs_to_main_plane(fb, color_plane);
1794         *hsub = drm_format_info_block_width(fb->format, color_plane) /
1795                 drm_format_info_block_width(fb->format, main_plane);
1796
1797         /*
1798          * The min stride check in the core framebuffer_check() function
1799          * assumes that format->hsub applies to every plane except for the
1800          * first plane. That's incorrect for the CCS AUX plane of the first
1801          * plane, but for the above check to pass we must define the block
1802          * width with that subsampling applied to it. Adjust the width here
1803          * accordingly, so we can calculate the actual subsampling factor.
1804          */
1805         if (main_plane == 0)
1806                 *hsub *= fb->format->hsub;
1807
1808         *vsub = 32;
1809 }
1810 static int
1811 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
1812 {
1813         struct drm_i915_private *i915 = to_i915(fb->dev);
1814         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1815         int main_plane;
1816         int hsub, vsub;
1817         int tile_width, tile_height;
1818         int ccs_x, ccs_y;
1819         int main_x, main_y;
1820
1821         if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
1822                 return 0;
1823
1824         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
1825         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
1826
1827         tile_width *= hsub;
1828         tile_height *= vsub;
1829
1830         ccs_x = (x * hsub) % tile_width;
1831         ccs_y = (y * vsub) % tile_height;
1832
1833         main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
1834         main_x = intel_fb->normal[main_plane].x % tile_width;
1835         main_y = intel_fb->normal[main_plane].y % tile_height;
1836
1837         /*
1838          * CCS doesn't have its own x/y offset register, so the intra CCS tile
1839          * x/y offsets must match between CCS and the main surface.
1840          */
1841         if (main_x != ccs_x || main_y != ccs_y) {
1842                 drm_dbg_kms(&i915->drm,
1843                               "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
1844                               main_x, main_y,
1845                               ccs_x, ccs_y,
1846                               intel_fb->normal[main_plane].x,
1847                               intel_fb->normal[main_plane].y,
1848                               x, y);
1849                 return -EINVAL;
1850         }
1851
1852         return 0;
1853 }
1854
1855 static void
1856 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
1857 {
1858         int main_plane = is_ccs_plane(fb, color_plane) ?
1859                          skl_ccs_to_main_plane(fb, color_plane) : 0;
1860         int main_hsub, main_vsub;
1861         int hsub, vsub;
1862
1863         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
1864         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
1865         *w = fb->width / main_hsub / hsub;
1866         *h = fb->height / main_vsub / vsub;
1867 }
1868
1869 /*
1870  * Setup the rotated view for an FB plane and return the size the GTT mapping
1871  * requires for this view.
1872  */
1873 static u32
1874 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
1875                   u32 gtt_offset_rotated, int x, int y,
1876                   unsigned int width, unsigned int height,
1877                   unsigned int tile_size,
1878                   unsigned int tile_width, unsigned int tile_height,
1879                   struct drm_framebuffer *fb)
1880 {
1881         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1882         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
1883         unsigned int pitch_tiles;
1884         struct drm_rect r;
1885
1886         /* Y or Yf modifiers required for 90/270 rotation */
1887         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
1888             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
1889                 return 0;
1890
1891         if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
1892                 return 0;
1893
1894         rot_info->plane[plane] = *plane_info;
1895
1896         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
1897
1898         /* rotate the x/y offsets to match the GTT view */
1899         drm_rect_init(&r, x, y, width, height);
1900         drm_rect_rotate(&r,
1901                         plane_info->width * tile_width,
1902                         plane_info->height * tile_height,
1903                         DRM_MODE_ROTATE_270);
1904         x = r.x1;
1905         y = r.y1;
1906
1907         /* rotate the tile dimensions to match the GTT view */
1908         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
1909         swap(tile_width, tile_height);
1910
1911         /*
1912          * We only keep the x/y offsets, so push all of the
1913          * gtt offset into the x/y offsets.
1914          */
1915         intel_adjust_tile_offset(&x, &y,
1916                                  tile_width, tile_height,
1917                                  tile_size, pitch_tiles,
1918                                  gtt_offset_rotated * tile_size, 0);
1919
1920         /*
1921          * First pixel of the framebuffer from
1922          * the start of the rotated gtt mapping.
1923          */
1924         intel_fb->rotated[plane].x = x;
1925         intel_fb->rotated[plane].y = y;
1926
1927         return plane_info->width * plane_info->height;
1928 }
1929
1930 static int
1931 intel_fill_fb_info(struct drm_i915_private *dev_priv,
1932                    struct drm_framebuffer *fb)
1933 {
1934         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1935         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1936         u32 gtt_offset_rotated = 0;
1937         unsigned int max_size = 0;
1938         int i, num_planes = fb->format->num_planes;
1939         unsigned int tile_size = intel_tile_size(dev_priv);
1940
1941         for (i = 0; i < num_planes; i++) {
1942                 unsigned int width, height;
1943                 unsigned int cpp, size;
1944                 u32 offset;
1945                 int x, y;
1946                 int ret;
1947
1948                 /*
1949                  * Plane 2 of Render Compression with Clear Color fb modifier
1950                  * is consumed by the driver and not passed to DE. Skip the
1951                  * arithmetic related to alignment and offset calculation.
1952                  */
1953                 if (is_gen12_ccs_cc_plane(fb, i)) {
1954                         if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
1955                                 continue;
1956                         else
1957                                 return -EINVAL;
1958                 }
1959
1960                 cpp = fb->format->cpp[i];
1961                 intel_fb_plane_dims(&width, &height, fb, i);
1962
1963                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
1964                 if (ret) {
1965                         drm_dbg_kms(&dev_priv->drm,
1966                                     "bad fb plane %d offset: 0x%x\n",
1967                                     i, fb->offsets[i]);
1968                         return ret;
1969                 }
1970
1971                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
1972                 if (ret)
1973                         return ret;
1974
1975                 /*
1976                  * The fence (if used) is aligned to the start of the object
1977                  * so having the framebuffer wrap around across the edge of the
1978                  * fenced region doesn't really work. We have no API to configure
1979                  * the fence start offset within the object (nor could we probably
1980                  * on gen2/3). So it's just easier if we just require that the
1981                  * fb layout agrees with the fence layout. We already check that the
1982                  * fb stride matches the fence stride elsewhere.
1983                  */
1984                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
1985                     (x + width) * cpp > fb->pitches[i]) {
1986                         drm_dbg_kms(&dev_priv->drm,
1987                                     "bad fb plane %d offset: 0x%x\n",
1988                                      i, fb->offsets[i]);
1989                         return -EINVAL;
1990                 }
1991
1992                 /*
1993                  * First pixel of the framebuffer from
1994                  * the start of the normal gtt mapping.
1995                  */
1996                 intel_fb->normal[i].x = x;
1997                 intel_fb->normal[i].y = y;
1998
1999                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2000                                                       fb->pitches[i],
2001                                                       DRM_MODE_ROTATE_0,
2002                                                       tile_size);
2003                 offset /= tile_size;
2004
2005                 if (!is_surface_linear(fb, i)) {
2006                         struct intel_remapped_plane_info plane_info;
2007                         unsigned int tile_width, tile_height;
2008
2009                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2010
2011                         plane_info.offset = offset;
2012                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
2013                                                          tile_width * cpp);
2014                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
2015                         plane_info.height = DIV_ROUND_UP(y + height,
2016                                                          tile_height);
2017
2018                         /* how many tiles does this plane need */
2019                         size = plane_info.stride * plane_info.height;
2020                         /*
2021                          * If the plane isn't horizontally tile aligned,
2022                          * we need one more tile.
2023                          */
2024                         if (x != 0)
2025                                 size++;
2026
2027                         gtt_offset_rotated +=
2028                                 setup_fb_rotation(i, &plane_info,
2029                                                   gtt_offset_rotated,
2030                                                   x, y, width, height,
2031                                                   tile_size,
2032                                                   tile_width, tile_height,
2033                                                   fb);
2034                 } else {
2035                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2036                                             x * cpp, tile_size);
2037                 }
2038
2039                 /* how many tiles in total needed in the bo */
2040                 max_size = max(max_size, offset + size);
2041         }
2042
2043         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2044                 drm_dbg_kms(&dev_priv->drm,
2045                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
2046                             mul_u32_u32(max_size, tile_size), obj->base.size);
2047                 return -EINVAL;
2048         }
2049
2050         return 0;
2051 }
2052
2053 static void
2054 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2055 {
2056         struct drm_i915_private *dev_priv =
2057                 to_i915(plane_state->uapi.plane->dev);
2058         struct drm_framebuffer *fb = plane_state->hw.fb;
2059         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2060         struct intel_rotation_info *info = &plane_state->view.rotated;
2061         unsigned int rotation = plane_state->hw.rotation;
2062         int i, num_planes = fb->format->num_planes;
2063         unsigned int tile_size = intel_tile_size(dev_priv);
2064         unsigned int src_x, src_y;
2065         unsigned int src_w, src_h;
2066         u32 gtt_offset = 0;
2067
2068         memset(&plane_state->view, 0, sizeof(plane_state->view));
2069         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2070                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2071
2072         src_x = plane_state->uapi.src.x1 >> 16;
2073         src_y = plane_state->uapi.src.y1 >> 16;
2074         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2075         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2076
2077         drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
2078
2079         /* Make src coordinates relative to the viewport */
2080         drm_rect_translate(&plane_state->uapi.src,
2081                            -(src_x << 16), -(src_y << 16));
2082
2083         /* Rotate src coordinates to match rotated GTT view */
2084         if (drm_rotation_90_or_270(rotation))
2085                 drm_rect_rotate(&plane_state->uapi.src,
2086                                 src_w << 16, src_h << 16,
2087                                 DRM_MODE_ROTATE_270);
2088
2089         for (i = 0; i < num_planes; i++) {
2090                 unsigned int hsub = i ? fb->format->hsub : 1;
2091                 unsigned int vsub = i ? fb->format->vsub : 1;
2092                 unsigned int cpp = fb->format->cpp[i];
2093                 unsigned int tile_width, tile_height;
2094                 unsigned int width, height;
2095                 unsigned int pitch_tiles;
2096                 unsigned int x, y;
2097                 u32 offset;
2098
2099                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2100
2101                 x = src_x / hsub;
2102                 y = src_y / vsub;
2103                 width = src_w / hsub;
2104                 height = src_h / vsub;
2105
2106                 /*
2107                  * First pixel of the src viewport from the
2108                  * start of the normal gtt mapping.
2109                  */
2110                 x += intel_fb->normal[i].x;
2111                 y += intel_fb->normal[i].y;
2112
2113                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2114                                                       fb, i, fb->pitches[i],
2115                                                       DRM_MODE_ROTATE_0, tile_size);
2116                 offset /= tile_size;
2117
2118                 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
2119                 info->plane[i].offset = offset;
2120                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2121                                                      tile_width * cpp);
2122                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2123                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2124
2125                 if (drm_rotation_90_or_270(rotation)) {
2126                         struct drm_rect r;
2127
2128                         /* rotate the x/y offsets to match the GTT view */
2129                         drm_rect_init(&r, x, y, width, height);
2130                         drm_rect_rotate(&r,
2131                                         info->plane[i].width * tile_width,
2132                                         info->plane[i].height * tile_height,
2133                                         DRM_MODE_ROTATE_270);
2134                         x = r.x1;
2135                         y = r.y1;
2136
2137                         pitch_tiles = info->plane[i].height;
2138                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2139
2140                         /* rotate the tile dimensions to match the GTT view */
2141                         swap(tile_width, tile_height);
2142                 } else {
2143                         pitch_tiles = info->plane[i].width;
2144                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2145                 }
2146
2147                 /*
2148                  * We only keep the x/y offsets, so push all of the
2149                  * gtt offset into the x/y offsets.
2150                  */
2151                 intel_adjust_tile_offset(&x, &y,
2152                                          tile_width, tile_height,
2153                                          tile_size, pitch_tiles,
2154                                          gtt_offset * tile_size, 0);
2155
2156                 gtt_offset += info->plane[i].width * info->plane[i].height;
2157
2158                 plane_state->color_plane[i].offset = 0;
2159                 plane_state->color_plane[i].x = x;
2160                 plane_state->color_plane[i].y = y;
2161         }
2162 }
2163
2164 int
2165 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2166 {
2167         const struct intel_framebuffer *fb =
2168                 to_intel_framebuffer(plane_state->hw.fb);
2169         unsigned int rotation = plane_state->hw.rotation;
2170         int i, num_planes;
2171
2172         if (!fb)
2173                 return 0;
2174
2175         num_planes = fb->base.format->num_planes;
2176
2177         if (intel_plane_needs_remap(plane_state)) {
2178                 intel_plane_remap_gtt(plane_state);
2179
2180                 /*
2181                  * Sometimes even remapping can't overcome
2182                  * the stride limitations :( Can happen with
2183                  * big plane sizes and suitably misaligned
2184                  * offsets.
2185                  */
2186                 return intel_plane_check_stride(plane_state);
2187         }
2188
2189         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2190
2191         for (i = 0; i < num_planes; i++) {
2192                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2193                 plane_state->color_plane[i].offset = 0;
2194
2195                 if (drm_rotation_90_or_270(rotation)) {
2196                         plane_state->color_plane[i].x = fb->rotated[i].x;
2197                         plane_state->color_plane[i].y = fb->rotated[i].y;
2198                 } else {
2199                         plane_state->color_plane[i].x = fb->normal[i].x;
2200                         plane_state->color_plane[i].y = fb->normal[i].y;
2201                 }
2202         }
2203
2204         /* Rotate src coordinates to match rotated GTT view */
2205         if (drm_rotation_90_or_270(rotation))
2206                 drm_rect_rotate(&plane_state->uapi.src,
2207                                 fb->base.width << 16, fb->base.height << 16,
2208                                 DRM_MODE_ROTATE_270);
2209
2210         return intel_plane_check_stride(plane_state);
2211 }
2212
2213 static struct i915_vma *
2214 initial_plane_vma(struct drm_i915_private *i915,
2215                   struct intel_initial_plane_config *plane_config)
2216 {
2217         struct drm_i915_gem_object *obj;
2218         struct i915_vma *vma;
2219         u32 base, size;
2220
2221         if (plane_config->size == 0)
2222                 return NULL;
2223
2224         base = round_down(plane_config->base,
2225                           I915_GTT_MIN_ALIGNMENT);
2226         size = round_up(plane_config->base + plane_config->size,
2227                         I915_GTT_MIN_ALIGNMENT);
2228         size -= base;
2229
2230         /*
2231          * If the FB is too big, just don't use it since fbdev is not very
2232          * important and we should probably use that space with FBC or other
2233          * features.
2234          */
2235         if (size * 2 > i915->stolen_usable_size)
2236                 return NULL;
2237
2238         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
2239         if (IS_ERR(obj))
2240                 return NULL;
2241
2242         /*
2243          * Mark it WT ahead of time to avoid changing the
2244          * cache_level during fbdev initialization. The
2245          * unbind there would get stuck waiting for rcu.
2246          */
2247         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
2248                                             I915_CACHE_WT : I915_CACHE_NONE);
2249
2250         switch (plane_config->tiling) {
2251         case I915_TILING_NONE:
2252                 break;
2253         case I915_TILING_X:
2254         case I915_TILING_Y:
2255                 obj->tiling_and_stride =
2256                         plane_config->fb->base.pitches[0] |
2257                         plane_config->tiling;
2258                 break;
2259         default:
2260                 MISSING_CASE(plane_config->tiling);
2261                 goto err_obj;
2262         }
2263
2264         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
2265         if (IS_ERR(vma))
2266                 goto err_obj;
2267
2268         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
2269                 goto err_obj;
2270
2271         if (i915_gem_object_is_tiled(obj) &&
2272             !i915_vma_is_map_and_fenceable(vma))
2273                 goto err_obj;
2274
2275         return vma;
2276
2277 err_obj:
2278         i915_gem_object_put(obj);
2279         return NULL;
2280 }
2281
2282 static bool
2283 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2284                               struct intel_initial_plane_config *plane_config)
2285 {
2286         struct drm_device *dev = crtc->base.dev;
2287         struct drm_i915_private *dev_priv = to_i915(dev);
2288         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2289         struct drm_framebuffer *fb = &plane_config->fb->base;
2290         struct i915_vma *vma;
2291
2292         switch (fb->modifier) {
2293         case DRM_FORMAT_MOD_LINEAR:
2294         case I915_FORMAT_MOD_X_TILED:
2295         case I915_FORMAT_MOD_Y_TILED:
2296                 break;
2297         default:
2298                 drm_dbg(&dev_priv->drm,
2299                         "Unsupported modifier for initial FB: 0x%llx\n",
2300                         fb->modifier);
2301                 return false;
2302         }
2303
2304         vma = initial_plane_vma(dev_priv, plane_config);
2305         if (!vma)
2306                 return false;
2307
2308         mode_cmd.pixel_format = fb->format->format;
2309         mode_cmd.width = fb->width;
2310         mode_cmd.height = fb->height;
2311         mode_cmd.pitches[0] = fb->pitches[0];
2312         mode_cmd.modifier[0] = fb->modifier;
2313         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2314
2315         if (intel_framebuffer_init(to_intel_framebuffer(fb),
2316                                    vma->obj, &mode_cmd)) {
2317                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
2318                 goto err_vma;
2319         }
2320
2321         plane_config->vma = vma;
2322         return true;
2323
2324 err_vma:
2325         i915_vma_put(vma);
2326         return false;
2327 }
2328
2329 static void
2330 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2331                         struct intel_plane_state *plane_state,
2332                         bool visible)
2333 {
2334         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2335
2336         plane_state->uapi.visible = visible;
2337
2338         if (visible)
2339                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
2340         else
2341                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
2342 }
2343
2344 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
2345 {
2346         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2347         struct drm_plane *plane;
2348
2349         /*
2350          * Active_planes aliases if multiple "primary" or cursor planes
2351          * have been used on the same (or wrong) pipe. plane_mask uses
2352          * unique ids, hence we can use that to reconstruct active_planes.
2353          */
2354         crtc_state->enabled_planes = 0;
2355         crtc_state->active_planes = 0;
2356
2357         drm_for_each_plane_mask(plane, &dev_priv->drm,
2358                                 crtc_state->uapi.plane_mask) {
2359                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
2360                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2361         }
2362 }
2363
2364 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2365                                          struct intel_plane *plane)
2366 {
2367         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2368         struct intel_crtc_state *crtc_state =
2369                 to_intel_crtc_state(crtc->base.state);
2370         struct intel_plane_state *plane_state =
2371                 to_intel_plane_state(plane->base.state);
2372
2373         drm_dbg_kms(&dev_priv->drm,
2374                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2375                     plane->base.base.id, plane->base.name,
2376                     crtc->base.base.id, crtc->base.name);
2377
2378         intel_set_plane_visible(crtc_state, plane_state, false);
2379         fixup_plane_bitmasks(crtc_state);
2380         crtc_state->data_rate[plane->id] = 0;
2381         crtc_state->min_cdclk[plane->id] = 0;
2382
2383         if (plane->id == PLANE_PRIMARY)
2384                 hsw_disable_ips(crtc_state);
2385
2386         /*
2387          * Vblank time updates from the shadow to live plane control register
2388          * are blocked if the memory self-refresh mode is active at that
2389          * moment. So to make sure the plane gets truly disabled, disable
2390          * first the self-refresh mode. The self-refresh enable bit in turn
2391          * will be checked/applied by the HW only at the next frame start
2392          * event which is after the vblank start event, so we need to have a
2393          * wait-for-vblank between disabling the plane and the pipe.
2394          */
2395         if (HAS_GMCH(dev_priv) &&
2396             intel_set_memory_cxsr(dev_priv, false))
2397                 intel_wait_for_vblank(dev_priv, crtc->pipe);
2398
2399         /*
2400          * Gen2 reports pipe underruns whenever all planes are disabled.
2401          * So disable underrun reporting before all the planes get disabled.
2402          */
2403         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
2404                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
2405
2406         intel_disable_plane(plane, crtc_state);
2407 }
2408
2409 static void
2410 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2411                              struct intel_initial_plane_config *plane_config)
2412 {
2413         struct drm_device *dev = intel_crtc->base.dev;
2414         struct drm_i915_private *dev_priv = to_i915(dev);
2415         struct drm_crtc *c;
2416         struct drm_plane *primary = intel_crtc->base.primary;
2417         struct drm_plane_state *plane_state = primary->state;
2418         struct intel_plane *intel_plane = to_intel_plane(primary);
2419         struct intel_plane_state *intel_state =
2420                 to_intel_plane_state(plane_state);
2421         struct intel_crtc_state *crtc_state =
2422                 to_intel_crtc_state(intel_crtc->base.state);
2423         struct drm_framebuffer *fb;
2424         struct i915_vma *vma;
2425
2426         if (!plane_config->fb)
2427                 return;
2428
2429         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2430                 fb = &plane_config->fb->base;
2431                 vma = plane_config->vma;
2432                 goto valid_fb;
2433         }
2434
2435         /*
2436          * Failed to alloc the obj, check to see if we should share
2437          * an fb with another CRTC instead
2438          */
2439         for_each_crtc(dev, c) {
2440                 struct intel_plane_state *state;
2441
2442                 if (c == &intel_crtc->base)
2443                         continue;
2444
2445                 if (!to_intel_crtc_state(c->state)->uapi.active)
2446                         continue;
2447
2448                 state = to_intel_plane_state(c->primary->state);
2449                 if (!state->vma)
2450                         continue;
2451
2452                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2453                         fb = state->hw.fb;
2454                         vma = state->vma;
2455                         goto valid_fb;
2456                 }
2457         }
2458
2459         /*
2460          * We've failed to reconstruct the BIOS FB.  Current display state
2461          * indicates that the primary plane is visible, but has a NULL FB,
2462          * which will lead to problems later if we don't fix it up.  The
2463          * simplest solution is to just disable the primary plane now and
2464          * pretend the BIOS never had it enabled.
2465          */
2466         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2467         if (crtc_state->bigjoiner) {
2468                 struct intel_crtc *slave =
2469                         crtc_state->bigjoiner_linked_crtc;
2470                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2471         }
2472
2473         return;
2474
2475 valid_fb:
2476         intel_state->hw.rotation = plane_config->rotation;
2477         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2478                                 intel_state->hw.rotation);
2479         intel_state->color_plane[0].stride =
2480                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
2481
2482         __i915_vma_pin(vma);
2483         intel_state->vma = i915_vma_get(vma);
2484         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
2485                 if (vma->fence)
2486                         intel_state->flags |= PLANE_HAS_FENCE;
2487
2488         plane_state->src_x = 0;
2489         plane_state->src_y = 0;
2490         plane_state->src_w = fb->width << 16;
2491         plane_state->src_h = fb->height << 16;
2492
2493         plane_state->crtc_x = 0;
2494         plane_state->crtc_y = 0;
2495         plane_state->crtc_w = fb->width;
2496         plane_state->crtc_h = fb->height;
2497
2498         intel_state->uapi.src = drm_plane_state_src(plane_state);
2499         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
2500
2501         if (plane_config->tiling)
2502                 dev_priv->preserve_bios_swizzle = true;
2503
2504         plane_state->fb = fb;
2505         drm_framebuffer_get(fb);
2506
2507         plane_state->crtc = &intel_crtc->base;
2508         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2509                                           intel_crtc);
2510
2511         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2512
2513         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2514                   &to_intel_frontbuffer(fb)->bits);
2515 }
2516
2517 unsigned int
2518 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2519 {
2520         int x = 0, y = 0;
2521
2522         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2523                                           plane_state->color_plane[0].offset, 0);
2524
2525         return y;
2526 }
2527
2528 static int
2529 __intel_display_resume(struct drm_device *dev,
2530                        struct drm_atomic_state *state,
2531                        struct drm_modeset_acquire_ctx *ctx)
2532 {
2533         struct drm_crtc_state *crtc_state;
2534         struct drm_crtc *crtc;
2535         int i, ret;
2536
2537         intel_modeset_setup_hw_state(dev, ctx);
2538         intel_vga_redisable(to_i915(dev));
2539
2540         if (!state)
2541                 return 0;
2542
2543         /*
2544          * We've duplicated the state, pointers to the old state are invalid.
2545          *
2546          * Don't attempt to use the old state until we commit the duplicated state.
2547          */
2548         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2549                 /*
2550                  * Force recalculation even if we restore
2551                  * current state. With fast modeset this may not result
2552                  * in a modeset when the state is compatible.
2553                  */
2554                 crtc_state->mode_changed = true;
2555         }
2556
2557         /* ignore any reset values/BIOS leftovers in the WM registers */
2558         if (!HAS_GMCH(to_i915(dev)))
2559                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
2560
2561         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2562
2563         drm_WARN_ON(dev, ret == -EDEADLK);
2564         return ret;
2565 }
2566
2567 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2568 {
2569         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2570                 intel_has_gpu_reset(&dev_priv->gt));
2571 }
2572
2573 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2574 {
2575         struct drm_device *dev = &dev_priv->drm;
2576         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2577         struct drm_atomic_state *state;
2578         int ret;
2579
2580         if (!HAS_DISPLAY(dev_priv))
2581                 return;
2582
2583         /* reset doesn't touch the display */
2584         if (!dev_priv->params.force_reset_modeset_test &&
2585             !gpu_reset_clobbers_display(dev_priv))
2586                 return;
2587
2588         /* We have a modeset vs reset deadlock, defensively unbreak it. */
2589         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2590         smp_mb__after_atomic();
2591         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2592
2593         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2594                 drm_dbg_kms(&dev_priv->drm,
2595                             "Modeset potentially stuck, unbreaking through wedging\n");
2596                 intel_gt_set_wedged(&dev_priv->gt);
2597         }
2598
2599         /*
2600          * Need mode_config.mutex so that we don't
2601          * trample ongoing ->detect() and whatnot.
2602          */
2603         mutex_lock(&dev->mode_config.mutex);
2604         drm_modeset_acquire_init(ctx, 0);
2605         while (1) {
2606                 ret = drm_modeset_lock_all_ctx(dev, ctx);
2607                 if (ret != -EDEADLK)
2608                         break;
2609
2610                 drm_modeset_backoff(ctx);
2611         }
2612         /*
2613          * Disabling the crtcs gracefully seems nicer. Also the
2614          * g33 docs say we should at least disable all the planes.
2615          */
2616         state = drm_atomic_helper_duplicate_state(dev, ctx);
2617         if (IS_ERR(state)) {
2618                 ret = PTR_ERR(state);
2619                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2620                         ret);
2621                 return;
2622         }
2623
2624         ret = drm_atomic_helper_disable_all(dev, ctx);
2625         if (ret) {
2626                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2627                         ret);
2628                 drm_atomic_state_put(state);
2629                 return;
2630         }
2631
2632         dev_priv->modeset_restore_state = state;
2633         state->acquire_ctx = ctx;
2634 }
2635
2636 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2637 {
2638         struct drm_device *dev = &dev_priv->drm;
2639         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2640         struct drm_atomic_state *state;
2641         int ret;
2642
2643         if (!HAS_DISPLAY(dev_priv))
2644                 return;
2645
2646         /* reset doesn't touch the display */
2647         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2648                 return;
2649
2650         state = fetch_and_zero(&dev_priv->modeset_restore_state);
2651         if (!state)
2652                 goto unlock;
2653
2654         /* reset doesn't touch the display */
2655         if (!gpu_reset_clobbers_display(dev_priv)) {
2656                 /* for testing only restore the display */
2657                 ret = __intel_display_resume(dev, state, ctx);
2658                 if (ret)
2659                         drm_err(&dev_priv->drm,
2660                                 "Restoring old state failed with %i\n", ret);
2661         } else {
2662                 /*
2663                  * The display has been reset as well,
2664                  * so need a full re-initialization.
2665                  */
2666                 intel_pps_unlock_regs_wa(dev_priv);
2667                 intel_modeset_init_hw(dev_priv);
2668                 intel_init_clock_gating(dev_priv);
2669                 intel_hpd_init(dev_priv);
2670
2671                 ret = __intel_display_resume(dev, state, ctx);
2672                 if (ret)
2673                         drm_err(&dev_priv->drm,
2674                                 "Restoring old state failed with %i\n", ret);
2675
2676                 intel_hpd_poll_disable(dev_priv);
2677         }
2678
2679         drm_atomic_state_put(state);
2680 unlock:
2681         drm_modeset_drop_locks(ctx);
2682         drm_modeset_acquire_fini(ctx);
2683         mutex_unlock(&dev->mode_config.mutex);
2684
2685         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2686 }
2687
2688 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2689 {
2690         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2691         enum pipe pipe = crtc->pipe;
2692         u32 tmp;
2693
2694         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2695
2696         /*
2697          * Display WA #1153: icl
2698          * enable hardware to bypass the alpha math
2699          * and rounding for per-pixel values 00 and 0xff
2700          */
2701         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2702         /*
2703          * Display WA # 1605353570: icl
2704          * Set the pixel rounding bit to 1 for allowing
2705          * passthrough of Frame buffer pixels unmodified
2706          * across pipe
2707          */
2708         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2709         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2710 }
2711
2712 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2713 {
2714         struct drm_crtc *crtc;
2715         bool cleanup_done;
2716
2717         drm_for_each_crtc(crtc, &dev_priv->drm) {
2718                 struct drm_crtc_commit *commit;
2719                 spin_lock(&crtc->commit_lock);
2720                 commit = list_first_entry_or_null(&crtc->commit_list,
2721                                                   struct drm_crtc_commit, commit_entry);
2722                 cleanup_done = commit ?
2723                         try_wait_for_completion(&commit->cleanup_done) : true;
2724                 spin_unlock(&crtc->commit_lock);
2725
2726                 if (cleanup_done)
2727                         continue;
2728
2729                 drm_crtc_wait_one_vblank(crtc);
2730
2731                 return true;
2732         }
2733
2734         return false;
2735 }
2736
2737 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2738 {
2739         u32 temp;
2740
2741         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2742
2743         mutex_lock(&dev_priv->sb_lock);
2744
2745         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2746         temp |= SBI_SSCCTL_DISABLE;
2747         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2748
2749         mutex_unlock(&dev_priv->sb_lock);
2750 }
2751
2752 /* Program iCLKIP clock to the desired frequency */
2753 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2754 {
2755         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2756         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2757         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2758         u32 divsel, phaseinc, auxdiv, phasedir = 0;
2759         u32 temp;
2760
2761         lpt_disable_iclkip(dev_priv);
2762
2763         /* The iCLK virtual clock root frequency is in MHz,
2764          * but the adjusted_mode->crtc_clock in in KHz. To get the
2765          * divisors, it is necessary to divide one by another, so we
2766          * convert the virtual clock precision to KHz here for higher
2767          * precision.
2768          */
2769         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2770                 u32 iclk_virtual_root_freq = 172800 * 1000;
2771                 u32 iclk_pi_range = 64;
2772                 u32 desired_divisor;
2773
2774                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2775                                                     clock << auxdiv);
2776                 divsel = (desired_divisor / iclk_pi_range) - 2;
2777                 phaseinc = desired_divisor % iclk_pi_range;
2778
2779                 /*
2780                  * Near 20MHz is a corner case which is
2781                  * out of range for the 7-bit divisor
2782                  */
2783                 if (divsel <= 0x7f)
2784                         break;
2785         }
2786
2787         /* This should not happen with any sane values */
2788         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2789                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2790         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2791                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2792
2793         drm_dbg_kms(&dev_priv->drm,
2794                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2795                     clock, auxdiv, divsel, phasedir, phaseinc);
2796
2797         mutex_lock(&dev_priv->sb_lock);
2798
2799         /* Program SSCDIVINTPHASE6 */
2800         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2801         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2802         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2803         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2804         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2805         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2806         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2807         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2808
2809         /* Program SSCAUXDIV */
2810         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2811         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2812         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2813         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2814
2815         /* Enable modulator and associated divider */
2816         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2817         temp &= ~SBI_SSCCTL_DISABLE;
2818         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2819
2820         mutex_unlock(&dev_priv->sb_lock);
2821
2822         /* Wait for initialization time */
2823         udelay(24);
2824
2825         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2826 }
2827
2828 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2829 {
2830         u32 divsel, phaseinc, auxdiv;
2831         u32 iclk_virtual_root_freq = 172800 * 1000;
2832         u32 iclk_pi_range = 64;
2833         u32 desired_divisor;
2834         u32 temp;
2835
2836         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2837                 return 0;
2838
2839         mutex_lock(&dev_priv->sb_lock);
2840
2841         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2842         if (temp & SBI_SSCCTL_DISABLE) {
2843                 mutex_unlock(&dev_priv->sb_lock);
2844                 return 0;
2845         }
2846
2847         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2848         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2849                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2850         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2851                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2852
2853         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2854         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2855                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2856
2857         mutex_unlock(&dev_priv->sb_lock);
2858
2859         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2860
2861         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2862                                  desired_divisor << auxdiv);
2863 }
2864
2865 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2866                                            enum pipe pch_transcoder)
2867 {
2868         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2869         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2870         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2871
2872         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2873                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2874         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2875                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2876         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2877                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2878
2879         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2880                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2881         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2882                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2883         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2884                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2885         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2886                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2887 }
2888
2889 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2890 {
2891         u32 temp;
2892
2893         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2894         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2895                 return;
2896
2897         drm_WARN_ON(&dev_priv->drm,
2898                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2899                     FDI_RX_ENABLE);
2900         drm_WARN_ON(&dev_priv->drm,
2901                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2902                     FDI_RX_ENABLE);
2903
2904         temp &= ~FDI_BC_BIFURCATION_SELECT;
2905         if (enable)
2906                 temp |= FDI_BC_BIFURCATION_SELECT;
2907
2908         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2909                     enable ? "en" : "dis");
2910         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2911         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2912 }
2913
2914 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2915 {
2916         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2917         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2918
2919         switch (crtc->pipe) {
2920         case PIPE_A:
2921                 break;
2922         case PIPE_B:
2923                 if (crtc_state->fdi_lanes > 2)
2924                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
2925                 else
2926                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
2927
2928                 break;
2929         case PIPE_C:
2930                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2931
2932                 break;
2933         default:
2934                 BUG();
2935         }
2936 }
2937
2938 /*
2939  * Finds the encoder associated with the given CRTC. This can only be
2940  * used when we know that the CRTC isn't feeding multiple encoders!
2941  */
2942 struct intel_encoder *
2943 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2944                            const struct intel_crtc_state *crtc_state)
2945 {
2946         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2947         const struct drm_connector_state *connector_state;
2948         const struct drm_connector *connector;
2949         struct intel_encoder *encoder = NULL;
2950         int num_encoders = 0;
2951         int i;
2952
2953         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2954                 if (connector_state->crtc != &crtc->base)
2955                         continue;
2956
2957                 encoder = to_intel_encoder(connector_state->best_encoder);
2958                 num_encoders++;
2959         }
2960
2961         drm_WARN(encoder->base.dev, num_encoders != 1,
2962                  "%d encoders for pipe %c\n",
2963                  num_encoders, pipe_name(crtc->pipe));
2964
2965         return encoder;
2966 }
2967
2968 /*
2969  * Enable PCH resources required for PCH ports:
2970  *   - PCH PLLs
2971  *   - FDI training & RX/TX
2972  *   - update transcoder timings
2973  *   - DP transcoding bits
2974  *   - transcoder
2975  */
2976 static void ilk_pch_enable(const struct intel_atomic_state *state,
2977                            const struct intel_crtc_state *crtc_state)
2978 {
2979         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2980         struct drm_device *dev = crtc->base.dev;
2981         struct drm_i915_private *dev_priv = to_i915(dev);
2982         enum pipe pipe = crtc->pipe;
2983         u32 temp;
2984
2985         assert_pch_transcoder_disabled(dev_priv, pipe);
2986
2987         if (IS_IVYBRIDGE(dev_priv))
2988                 ivb_update_fdi_bc_bifurcation(crtc_state);
2989
2990         /* Write the TU size bits before fdi link training, so that error
2991          * detection works. */
2992         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2993                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2994
2995         /* For PCH output, training FDI link */
2996         dev_priv->display.fdi_link_train(crtc, crtc_state);
2997
2998         /* We need to program the right clock selection before writing the pixel
2999          * mutliplier into the DPLL. */
3000         if (HAS_PCH_CPT(dev_priv)) {
3001                 u32 sel;
3002
3003                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3004                 temp |= TRANS_DPLL_ENABLE(pipe);
3005                 sel = TRANS_DPLLB_SEL(pipe);
3006                 if (crtc_state->shared_dpll ==
3007                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
3008                         temp |= sel;
3009                 else
3010                         temp &= ~sel;
3011                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3012         }
3013
3014         /* XXX: pch pll's can be enabled any time before we enable the PCH
3015          * transcoder, and we actually should do this to not upset any PCH
3016          * transcoder that already use the clock when we share it.
3017          *
3018          * Note that enable_shared_dpll tries to do the right thing, but
3019          * get_shared_dpll unconditionally resets the pll - we need that to have
3020          * the right LVDS enable sequence. */
3021         intel_enable_shared_dpll(crtc_state);
3022
3023         /* set transcoder timing, panel must allow it */
3024         assert_panel_unlocked(dev_priv, pipe);
3025         ilk_pch_transcoder_set_timings(crtc_state, pipe);
3026
3027         intel_fdi_normal_train(crtc);
3028
3029         /* For PCH DP, enable TRANS_DP_CTL */
3030         if (HAS_PCH_CPT(dev_priv) &&
3031             intel_crtc_has_dp_encoder(crtc_state)) {
3032                 const struct drm_display_mode *adjusted_mode =
3033                         &crtc_state->hw.adjusted_mode;
3034                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3035                 i915_reg_t reg = TRANS_DP_CTL(pipe);
3036                 enum port port;
3037
3038                 temp = intel_de_read(dev_priv, reg);
3039                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3040                           TRANS_DP_SYNC_MASK |
3041                           TRANS_DP_BPC_MASK);
3042                 temp |= TRANS_DP_OUTPUT_ENABLE;
3043                 temp |= bpc << 9; /* same format but at 11:9 */
3044
3045                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
3046                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3047                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
3048                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3049
3050                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
3051                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
3052                 temp |= TRANS_DP_PORT_SEL(port);
3053
3054                 intel_de_write(dev_priv, reg, temp);
3055         }
3056
3057         ilk_enable_pch_transcoder(crtc_state);
3058 }
3059
3060 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
3061 {
3062         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3063         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3064         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3065
3066         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
3067
3068         lpt_program_iclkip(crtc_state);
3069
3070         /* Set transcoder timing. */
3071         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
3072
3073         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3074 }
3075
3076 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
3077                                enum pipe pipe)
3078 {
3079         i915_reg_t dslreg = PIPEDSL(pipe);
3080         u32 temp;
3081
3082         temp = intel_de_read(dev_priv, dslreg);
3083         udelay(500);
3084         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
3085                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
3086                         drm_err(&dev_priv->drm,
3087                                 "mode set failed: pipe %c stuck\n",
3088                                 pipe_name(pipe));
3089         }
3090 }
3091
3092 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
3093 {
3094         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3095         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3096         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3097         enum pipe pipe = crtc->pipe;
3098         int width = drm_rect_width(dst);
3099         int height = drm_rect_height(dst);
3100         int x = dst->x1;
3101         int y = dst->y1;
3102
3103         if (!crtc_state->pch_pfit.enabled)
3104                 return;
3105
3106         /* Force use of hard-coded filter coefficients
3107          * as some pre-programmed values are broken,
3108          * e.g. x201.
3109          */
3110         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3111                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3112                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
3113         else
3114                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3115                                PF_FILTER_MED_3x3);
3116         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
3117         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
3118 }
3119
3120 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
3121 {
3122         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3123         struct drm_device *dev = crtc->base.dev;
3124         struct drm_i915_private *dev_priv = to_i915(dev);
3125
3126         if (!crtc_state->ips_enabled)
3127                 return;
3128
3129         /*
3130          * We can only enable IPS after we enable a plane and wait for a vblank
3131          * This function is called from post_plane_update, which is run after
3132          * a vblank wait.
3133          */
3134         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
3135
3136         if (IS_BROADWELL(dev_priv)) {
3137                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
3138                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
3139                 /* Quoting Art Runyan: "its not safe to expect any particular
3140                  * value in IPS_CTL bit 31 after enabling IPS through the
3141                  * mailbox." Moreover, the mailbox may return a bogus state,
3142                  * so we need to just enable it and continue on.
3143                  */
3144         } else {
3145                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
3146                 /* The bit only becomes 1 in the next vblank, so this wait here
3147                  * is essentially intel_wait_for_vblank. If we don't have this
3148                  * and don't wait for vblanks until the end of crtc_enable, then
3149                  * the HW state readout code will complain that the expected
3150                  * IPS_CTL value is not the one we read. */
3151                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
3152                         drm_err(&dev_priv->drm,
3153                                 "Timed out waiting for IPS enable\n");
3154         }
3155 }
3156
3157 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
3158 {
3159         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3160         struct drm_device *dev = crtc->base.dev;
3161         struct drm_i915_private *dev_priv = to_i915(dev);
3162
3163         if (!crtc_state->ips_enabled)
3164                 return;
3165
3166         if (IS_BROADWELL(dev_priv)) {
3167                 drm_WARN_ON(dev,
3168                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3169                 /*
3170                  * Wait for PCODE to finish disabling IPS. The BSpec specified
3171                  * 42ms timeout value leads to occasional timeouts so use 100ms
3172                  * instead.
3173                  */
3174                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
3175                         drm_err(&dev_priv->drm,
3176                                 "Timed out waiting for IPS disable\n");
3177         } else {
3178                 intel_de_write(dev_priv, IPS_CTL, 0);
3179                 intel_de_posting_read(dev_priv, IPS_CTL);
3180         }
3181
3182         /* We need to wait for a vblank before we can disable the plane. */
3183         intel_wait_for_vblank(dev_priv, crtc->pipe);
3184 }
3185
3186 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
3187 {
3188         if (intel_crtc->overlay)
3189                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3190
3191         /* Let userspace switch the overlay on again. In most cases userspace
3192          * has to recompute where to put it anyway.
3193          */
3194 }
3195
3196 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
3197                                        const struct intel_crtc_state *new_crtc_state)
3198 {
3199         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3200         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3201
3202         if (!old_crtc_state->ips_enabled)
3203                 return false;
3204
3205         if (intel_crtc_needs_modeset(new_crtc_state))
3206                 return true;
3207
3208         /*
3209          * Workaround : Do not read or write the pipe palette/gamma data while
3210          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3211          *
3212          * Disable IPS before we program the LUT.
3213          */
3214         if (IS_HASWELL(dev_priv) &&
3215             (new_crtc_state->uapi.color_mgmt_changed ||
3216              new_crtc_state->update_pipe) &&
3217             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3218                 return true;
3219
3220         return !new_crtc_state->ips_enabled;
3221 }
3222
3223 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
3224                                        const struct intel_crtc_state *new_crtc_state)
3225 {
3226         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3227         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3228
3229         if (!new_crtc_state->ips_enabled)
3230                 return false;
3231
3232         if (intel_crtc_needs_modeset(new_crtc_state))
3233                 return true;
3234
3235         /*
3236          * Workaround : Do not read or write the pipe palette/gamma data while
3237          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3238          *
3239          * Re-enable IPS after the LUT has been programmed.
3240          */
3241         if (IS_HASWELL(dev_priv) &&
3242             (new_crtc_state->uapi.color_mgmt_changed ||
3243              new_crtc_state->update_pipe) &&
3244             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3245                 return true;
3246
3247         /*
3248          * We can't read out IPS on broadwell, assume the worst and
3249          * forcibly enable IPS on the first fastset.
3250          */
3251         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
3252                 return true;
3253
3254         return !old_crtc_state->ips_enabled;
3255 }
3256
3257 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
3258 {
3259         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3260
3261         if (!crtc_state->nv12_planes)
3262                 return false;
3263
3264         /* WA Display #0827: Gen9:all */
3265         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
3266                 return true;
3267
3268         return false;
3269 }
3270
3271 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
3272 {
3273         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3274
3275         /* Wa_2006604312:icl,ehl */
3276         if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
3277                 return true;
3278
3279         return false;
3280 }
3281
3282 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
3283                             const struct intel_crtc_state *new_crtc_state)
3284 {
3285         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
3286                 new_crtc_state->active_planes;
3287 }
3288
3289 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
3290                              const struct intel_crtc_state *new_crtc_state)
3291 {
3292         return old_crtc_state->active_planes &&
3293                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
3294 }
3295
3296 static void intel_post_plane_update(struct intel_atomic_state *state,
3297                                     struct intel_crtc *crtc)
3298 {
3299         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3300         const struct intel_crtc_state *old_crtc_state =
3301                 intel_atomic_get_old_crtc_state(state, crtc);
3302         const struct intel_crtc_state *new_crtc_state =
3303                 intel_atomic_get_new_crtc_state(state, crtc);
3304         enum pipe pipe = crtc->pipe;
3305
3306         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
3307
3308         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
3309                 intel_update_watermarks(crtc);
3310
3311         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
3312                 hsw_enable_ips(new_crtc_state);
3313
3314         intel_fbc_post_update(state, crtc);
3315
3316         if (needs_nv12_wa(old_crtc_state) &&
3317             !needs_nv12_wa(new_crtc_state))
3318                 skl_wa_827(dev_priv, pipe, false);
3319
3320         if (needs_scalerclk_wa(old_crtc_state) &&
3321             !needs_scalerclk_wa(new_crtc_state))
3322                 icl_wa_scalerclkgating(dev_priv, pipe, false);
3323 }
3324
3325 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
3326                                         struct intel_crtc *crtc)
3327 {
3328         const struct intel_crtc_state *crtc_state =
3329                 intel_atomic_get_new_crtc_state(state, crtc);
3330         u8 update_planes = crtc_state->update_planes;
3331         const struct intel_plane_state *plane_state;
3332         struct intel_plane *plane;
3333         int i;
3334
3335         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3336                 if (plane->enable_flip_done &&
3337                     plane->pipe == crtc->pipe &&
3338                     update_planes & BIT(plane->id))
3339                         plane->enable_flip_done(plane);
3340         }
3341 }
3342
3343 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
3344                                          struct intel_crtc *crtc)
3345 {
3346         const struct intel_crtc_state *crtc_state =
3347                 intel_atomic_get_new_crtc_state(state, crtc);
3348         u8 update_planes = crtc_state->update_planes;
3349         const struct intel_plane_state *plane_state;
3350         struct intel_plane *plane;
3351         int i;
3352
3353         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3354                 if (plane->disable_flip_done &&
3355                     plane->pipe == crtc->pipe &&
3356                     update_planes & BIT(plane->id))
3357                         plane->disable_flip_done(plane);
3358         }
3359 }
3360
3361 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
3362                                              struct intel_crtc *crtc)
3363 {
3364         struct drm_i915_private *i915 = to_i915(state->base.dev);
3365         const struct intel_crtc_state *old_crtc_state =
3366                 intel_atomic_get_old_crtc_state(state, crtc);
3367         const struct intel_crtc_state *new_crtc_state =
3368                 intel_atomic_get_new_crtc_state(state, crtc);
3369         u8 update_planes = new_crtc_state->update_planes;
3370         const struct intel_plane_state *old_plane_state;
3371         struct intel_plane *plane;
3372         bool need_vbl_wait = false;
3373         int i;
3374
3375         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3376                 if (plane->need_async_flip_disable_wa &&
3377                     plane->pipe == crtc->pipe &&
3378                     update_planes & BIT(plane->id)) {
3379                         /*
3380                          * Apart from the async flip bit we want to
3381                          * preserve the old state for the plane.
3382                          */
3383                         plane->async_flip(plane, old_crtc_state,
3384                                           old_plane_state, false);
3385                         need_vbl_wait = true;
3386                 }
3387         }
3388
3389         if (need_vbl_wait)
3390                 intel_wait_for_vblank(i915, crtc->pipe);
3391 }
3392
3393 static void intel_pre_plane_update(struct intel_atomic_state *state,
3394                                    struct intel_crtc *crtc)
3395 {
3396         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3397         const struct intel_crtc_state *old_crtc_state =
3398                 intel_atomic_get_old_crtc_state(state, crtc);
3399         const struct intel_crtc_state *new_crtc_state =
3400                 intel_atomic_get_new_crtc_state(state, crtc);
3401         enum pipe pipe = crtc->pipe;
3402
3403         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
3404                 hsw_disable_ips(old_crtc_state);
3405
3406         if (intel_fbc_pre_update(state, crtc))
3407                 intel_wait_for_vblank(dev_priv, pipe);
3408
3409         /* Display WA 827 */
3410         if (!needs_nv12_wa(old_crtc_state) &&
3411             needs_nv12_wa(new_crtc_state))
3412                 skl_wa_827(dev_priv, pipe, true);
3413
3414         /* Wa_2006604312:icl,ehl */
3415         if (!needs_scalerclk_wa(old_crtc_state) &&
3416             needs_scalerclk_wa(new_crtc_state))
3417                 icl_wa_scalerclkgating(dev_priv, pipe, true);
3418
3419         /*
3420          * Vblank time updates from the shadow to live plane control register
3421          * are blocked if the memory self-refresh mode is active at that
3422          * moment. So to make sure the plane gets truly disabled, disable
3423          * first the self-refresh mode. The self-refresh enable bit in turn
3424          * will be checked/applied by the HW only at the next frame start
3425          * event which is after the vblank start event, so we need to have a
3426          * wait-for-vblank between disabling the plane and the pipe.
3427          */
3428         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
3429             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
3430                 intel_wait_for_vblank(dev_priv, pipe);
3431
3432         /*
3433          * IVB workaround: must disable low power watermarks for at least
3434          * one frame before enabling scaling.  LP watermarks can be re-enabled
3435          * when scaling is disabled.
3436          *
3437          * WaCxSRDisabledForSpriteScaling:ivb
3438          */
3439         if (old_crtc_state->hw.active &&
3440             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
3441                 intel_wait_for_vblank(dev_priv, pipe);
3442
3443         /*
3444          * If we're doing a modeset we don't need to do any
3445          * pre-vblank watermark programming here.
3446          */
3447         if (!intel_crtc_needs_modeset(new_crtc_state)) {
3448                 /*
3449                  * For platforms that support atomic watermarks, program the
3450                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
3451                  * will be the intermediate values that are safe for both pre- and
3452                  * post- vblank; when vblank happens, the 'active' values will be set
3453                  * to the final 'target' values and we'll do this again to get the
3454                  * optimal watermarks.  For gen9+ platforms, the values we program here
3455                  * will be the final target values which will get automatically latched
3456                  * at vblank time; no further programming will be necessary.
3457                  *
3458                  * If a platform hasn't been transitioned to atomic watermarks yet,
3459                  * we'll continue to update watermarks the old way, if flags tell
3460                  * us to.
3461                  */
3462                 if (dev_priv->display.initial_watermarks)
3463                         dev_priv->display.initial_watermarks(state, crtc);
3464                 else if (new_crtc_state->update_wm_pre)
3465                         intel_update_watermarks(crtc);
3466         }
3467
3468         /*
3469          * Gen2 reports pipe underruns whenever all planes are disabled.
3470          * So disable underrun reporting before all the planes get disabled.
3471          *
3472          * We do this after .initial_watermarks() so that we have a
3473          * chance of catching underruns with the intermediate watermarks
3474          * vs. the old plane configuration.
3475          */
3476         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
3477                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3478
3479         /*
3480          * WA for platforms where async address update enable bit
3481          * is double buffered and only latched at start of vblank.
3482          */
3483         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
3484                 intel_crtc_async_flip_disable_wa(state, crtc);
3485 }
3486
3487 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
3488                                       struct intel_crtc *crtc)
3489 {
3490         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3491         const struct intel_crtc_state *new_crtc_state =
3492                 intel_atomic_get_new_crtc_state(state, crtc);
3493         unsigned int update_mask = new_crtc_state->update_planes;
3494         const struct intel_plane_state *old_plane_state;
3495         struct intel_plane *plane;
3496         unsigned fb_bits = 0;
3497         int i;
3498
3499         intel_crtc_dpms_overlay_disable(crtc);
3500
3501         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3502                 if (crtc->pipe != plane->pipe ||
3503                     !(update_mask & BIT(plane->id)))
3504                         continue;
3505
3506                 intel_disable_plane(plane, new_crtc_state);
3507
3508                 if (old_plane_state->uapi.visible)
3509                         fb_bits |= plane->frontbuffer_bit;
3510         }
3511
3512         intel_frontbuffer_flip(dev_priv, fb_bits);
3513 }
3514
3515 /*
3516  * intel_connector_primary_encoder - get the primary encoder for a connector
3517  * @connector: connector for which to return the encoder
3518  *
3519  * Returns the primary encoder for a connector. There is a 1:1 mapping from
3520  * all connectors to their encoder, except for DP-MST connectors which have
3521  * both a virtual and a primary encoder. These DP-MST primary encoders can be
3522  * pointed to by as many DP-MST connectors as there are pipes.
3523  */
3524 static struct intel_encoder *
3525 intel_connector_primary_encoder(struct intel_connector *connector)
3526 {
3527         struct intel_encoder *encoder;
3528
3529         if (connector->mst_port)
3530                 return &dp_to_dig_port(connector->mst_port)->base;
3531
3532         encoder = intel_attached_encoder(connector);
3533         drm_WARN_ON(connector->base.dev, !encoder);
3534
3535         return encoder;
3536 }
3537
3538 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3539 {
3540         struct drm_connector_state *new_conn_state;
3541         struct drm_connector *connector;
3542         int i;
3543
3544         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3545                                         i) {
3546                 struct intel_connector *intel_connector;
3547                 struct intel_encoder *encoder;
3548                 struct intel_crtc *crtc;
3549
3550                 if (!intel_connector_needs_modeset(state, connector))
3551                         continue;
3552
3553                 intel_connector = to_intel_connector(connector);
3554                 encoder = intel_connector_primary_encoder(intel_connector);
3555                 if (!encoder->update_prepare)
3556                         continue;
3557
3558                 crtc = new_conn_state->crtc ?
3559                         to_intel_crtc(new_conn_state->crtc) : NULL;
3560                 encoder->update_prepare(state, encoder, crtc);
3561         }
3562 }
3563
3564 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3565 {
3566         struct drm_connector_state *new_conn_state;
3567         struct drm_connector *connector;
3568         int i;
3569
3570         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3571                                         i) {
3572                 struct intel_connector *intel_connector;
3573                 struct intel_encoder *encoder;
3574                 struct intel_crtc *crtc;
3575
3576                 if (!intel_connector_needs_modeset(state, connector))
3577                         continue;
3578
3579                 intel_connector = to_intel_connector(connector);
3580                 encoder = intel_connector_primary_encoder(intel_connector);
3581                 if (!encoder->update_complete)
3582                         continue;
3583
3584                 crtc = new_conn_state->crtc ?
3585                         to_intel_crtc(new_conn_state->crtc) : NULL;
3586                 encoder->update_complete(state, encoder, crtc);
3587         }
3588 }
3589
3590 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3591                                           struct intel_crtc *crtc)
3592 {
3593         const struct intel_crtc_state *crtc_state =
3594                 intel_atomic_get_new_crtc_state(state, crtc);
3595         const struct drm_connector_state *conn_state;
3596         struct drm_connector *conn;
3597         int i;
3598
3599         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3600                 struct intel_encoder *encoder =
3601                         to_intel_encoder(conn_state->best_encoder);
3602
3603                 if (conn_state->crtc != &crtc->base)
3604                         continue;
3605
3606                 if (encoder->pre_pll_enable)
3607                         encoder->pre_pll_enable(state, encoder,
3608                                                 crtc_state, conn_state);
3609         }
3610 }
3611
3612 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3613                                       struct intel_crtc *crtc)
3614 {
3615         const struct intel_crtc_state *crtc_state =
3616                 intel_atomic_get_new_crtc_state(state, crtc);
3617         const struct drm_connector_state *conn_state;
3618         struct drm_connector *conn;
3619         int i;
3620
3621         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3622                 struct intel_encoder *encoder =
3623                         to_intel_encoder(conn_state->best_encoder);
3624
3625                 if (conn_state->crtc != &crtc->base)
3626                         continue;
3627
3628                 if (encoder->pre_enable)
3629                         encoder->pre_enable(state, encoder,
3630                                             crtc_state, conn_state);
3631         }
3632 }
3633
3634 static void intel_encoders_enable(struct intel_atomic_state *state,
3635                                   struct intel_crtc *crtc)
3636 {
3637         const struct intel_crtc_state *crtc_state =
3638                 intel_atomic_get_new_crtc_state(state, crtc);
3639         const struct drm_connector_state *conn_state;
3640         struct drm_connector *conn;
3641         int i;
3642
3643         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3644                 struct intel_encoder *encoder =
3645                         to_intel_encoder(conn_state->best_encoder);
3646
3647                 if (conn_state->crtc != &crtc->base)
3648                         continue;
3649
3650                 if (encoder->enable)
3651                         encoder->enable(state, encoder,
3652                                         crtc_state, conn_state);
3653                 intel_opregion_notify_encoder(encoder, true);
3654         }
3655 }
3656
3657 static void intel_encoders_disable(struct intel_atomic_state *state,
3658                                    struct intel_crtc *crtc)
3659 {
3660         const struct intel_crtc_state *old_crtc_state =
3661                 intel_atomic_get_old_crtc_state(state, crtc);
3662         const struct drm_connector_state *old_conn_state;
3663         struct drm_connector *conn;
3664         int i;
3665
3666         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3667                 struct intel_encoder *encoder =
3668                         to_intel_encoder(old_conn_state->best_encoder);
3669
3670                 if (old_conn_state->crtc != &crtc->base)
3671                         continue;
3672
3673                 intel_opregion_notify_encoder(encoder, false);
3674                 if (encoder->disable)
3675                         encoder->disable(state, encoder,
3676                                          old_crtc_state, old_conn_state);
3677         }
3678 }
3679
3680 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3681                                         struct intel_crtc *crtc)
3682 {
3683         const struct intel_crtc_state *old_crtc_state =
3684                 intel_atomic_get_old_crtc_state(state, crtc);
3685         const struct drm_connector_state *old_conn_state;
3686         struct drm_connector *conn;
3687         int i;
3688
3689         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3690                 struct intel_encoder *encoder =
3691                         to_intel_encoder(old_conn_state->best_encoder);
3692
3693                 if (old_conn_state->crtc != &crtc->base)
3694                         continue;
3695
3696                 if (encoder->post_disable)
3697                         encoder->post_disable(state, encoder,
3698                                               old_crtc_state, old_conn_state);
3699         }
3700 }
3701
3702 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3703                                             struct intel_crtc *crtc)
3704 {
3705         const struct intel_crtc_state *old_crtc_state =
3706                 intel_atomic_get_old_crtc_state(state, crtc);
3707         const struct drm_connector_state *old_conn_state;
3708         struct drm_connector *conn;
3709         int i;
3710
3711         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3712                 struct intel_encoder *encoder =
3713                         to_intel_encoder(old_conn_state->best_encoder);
3714
3715                 if (old_conn_state->crtc != &crtc->base)
3716                         continue;
3717
3718                 if (encoder->post_pll_disable)
3719                         encoder->post_pll_disable(state, encoder,
3720                                                   old_crtc_state, old_conn_state);
3721         }
3722 }
3723
3724 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3725                                        struct intel_crtc *crtc)
3726 {
3727         const struct intel_crtc_state *crtc_state =
3728                 intel_atomic_get_new_crtc_state(state, crtc);
3729         const struct drm_connector_state *conn_state;
3730         struct drm_connector *conn;
3731         int i;
3732
3733         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3734                 struct intel_encoder *encoder =
3735                         to_intel_encoder(conn_state->best_encoder);
3736
3737                 if (conn_state->crtc != &crtc->base)
3738                         continue;
3739
3740                 if (encoder->update_pipe)
3741                         encoder->update_pipe(state, encoder,
3742                                              crtc_state, conn_state);
3743         }
3744 }
3745
3746 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3747 {
3748         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3749         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3750
3751         plane->disable_plane(plane, crtc_state);
3752 }
3753
3754 static void ilk_crtc_enable(struct intel_atomic_state *state,
3755                             struct intel_crtc *crtc)
3756 {
3757         const struct intel_crtc_state *new_crtc_state =
3758                 intel_atomic_get_new_crtc_state(state, crtc);
3759         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3760         enum pipe pipe = crtc->pipe;
3761
3762         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3763                 return;
3764
3765         /*
3766          * Sometimes spurious CPU pipe underruns happen during FDI
3767          * training, at least with VGA+HDMI cloning. Suppress them.
3768          *
3769          * On ILK we get an occasional spurious CPU pipe underruns
3770          * between eDP port A enable and vdd enable. Also PCH port
3771          * enable seems to result in the occasional CPU pipe underrun.
3772          *
3773          * Spurious PCH underruns also occur during PCH enabling.
3774          */
3775         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3776         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3777
3778         if (new_crtc_state->has_pch_encoder)
3779                 intel_prepare_shared_dpll(new_crtc_state);
3780
3781         if (intel_crtc_has_dp_encoder(new_crtc_state))
3782                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3783
3784         intel_set_transcoder_timings(new_crtc_state);
3785         intel_set_pipe_src_size(new_crtc_state);
3786
3787         if (new_crtc_state->has_pch_encoder)
3788                 intel_cpu_transcoder_set_m_n(new_crtc_state,
3789                                              &new_crtc_state->fdi_m_n, NULL);
3790
3791         ilk_set_pipeconf(new_crtc_state);
3792
3793         crtc->active = true;
3794
3795         intel_encoders_pre_enable(state, crtc);
3796
3797         if (new_crtc_state->has_pch_encoder) {
3798                 /* Note: FDI PLL enabling _must_ be done before we enable the
3799                  * cpu pipes, hence this is separate from all the other fdi/pch
3800                  * enabling. */
3801                 ilk_fdi_pll_enable(new_crtc_state);
3802         } else {
3803                 assert_fdi_tx_disabled(dev_priv, pipe);
3804                 assert_fdi_rx_disabled(dev_priv, pipe);
3805         }
3806
3807         ilk_pfit_enable(new_crtc_state);
3808
3809         /*
3810          * On ILK+ LUT must be loaded before the pipe is running but with
3811          * clocks enabled
3812          */
3813         intel_color_load_luts(new_crtc_state);
3814         intel_color_commit(new_crtc_state);
3815         /* update DSPCNTR to configure gamma for pipe bottom color */
3816         intel_disable_primary_plane(new_crtc_state);
3817
3818         if (dev_priv->display.initial_watermarks)
3819                 dev_priv->display.initial_watermarks(state, crtc);
3820         intel_enable_pipe(new_crtc_state);
3821
3822         if (new_crtc_state->has_pch_encoder)
3823                 ilk_pch_enable(state, new_crtc_state);
3824
3825         intel_crtc_vblank_on(new_crtc_state);
3826
3827         intel_encoders_enable(state, crtc);
3828
3829         if (HAS_PCH_CPT(dev_priv))
3830                 cpt_verify_modeset(dev_priv, pipe);
3831
3832         /*
3833          * Must wait for vblank to avoid spurious PCH FIFO underruns.
3834          * And a second vblank wait is needed at least on ILK with
3835          * some interlaced HDMI modes. Let's do the double wait always
3836          * in case there are more corner cases we don't know about.
3837          */
3838         if (new_crtc_state->has_pch_encoder) {
3839                 intel_wait_for_vblank(dev_priv, pipe);
3840                 intel_wait_for_vblank(dev_priv, pipe);
3841         }
3842         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3843         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3844 }
3845
3846 /* IPS only exists on ULT machines and is tied to pipe A. */
3847 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3848 {
3849         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3850 }
3851
3852 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3853                                             enum pipe pipe, bool apply)
3854 {
3855         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3856         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3857
3858         if (apply)
3859                 val |= mask;
3860         else
3861                 val &= ~mask;
3862
3863         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3864 }
3865
3866 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3867 {
3868         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3869         enum pipe pipe = crtc->pipe;
3870         u32 val;
3871
3872         val = MBUS_DBOX_A_CREDIT(2);
3873
3874         if (INTEL_GEN(dev_priv) >= 12) {
3875                 val |= MBUS_DBOX_BW_CREDIT(2);
3876                 val |= MBUS_DBOX_B_CREDIT(12);
3877         } else {
3878                 val |= MBUS_DBOX_BW_CREDIT(1);
3879                 val |= MBUS_DBOX_B_CREDIT(8);
3880         }
3881
3882         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3883 }
3884
3885 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3886 {
3887         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3888         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3889
3890         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3891                        HSW_LINETIME(crtc_state->linetime) |
3892                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
3893 }
3894
3895 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3896 {
3897         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3898         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3899         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3900         u32 val;
3901
3902         val = intel_de_read(dev_priv, reg);
3903         val &= ~HSW_FRAME_START_DELAY_MASK;
3904         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3905         intel_de_write(dev_priv, reg, val);
3906 }
3907
3908 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3909                                          const struct intel_crtc_state *crtc_state)
3910 {
3911         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3912         struct intel_crtc_state *master_crtc_state;
3913         struct drm_connector_state *conn_state;
3914         struct drm_connector *conn;
3915         struct intel_encoder *encoder = NULL;
3916         int i;
3917
3918         if (crtc_state->bigjoiner_slave)
3919                 master = crtc_state->bigjoiner_linked_crtc;
3920
3921         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3922
3923         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3924                 if (conn_state->crtc != &master->base)
3925                         continue;
3926
3927                 encoder = to_intel_encoder(conn_state->best_encoder);
3928                 break;
3929         }
3930
3931         if (!crtc_state->bigjoiner_slave) {
3932                 /* need to enable VDSC, which we skipped in pre-enable */
3933                 intel_dsc_enable(encoder, crtc_state);
3934         } else {
3935                 /*
3936                  * Enable sequence steps 1-7 on bigjoiner master
3937                  */
3938                 intel_encoders_pre_pll_enable(state, master);
3939                 intel_enable_shared_dpll(master_crtc_state);
3940                 intel_encoders_pre_enable(state, master);
3941
3942                 /* and DSC on slave */
3943                 intel_dsc_enable(NULL, crtc_state);
3944         }
3945 }
3946
3947 static void hsw_crtc_enable(struct intel_atomic_state *state,
3948                             struct intel_crtc *crtc)
3949 {
3950         const struct intel_crtc_state *new_crtc_state =
3951                 intel_atomic_get_new_crtc_state(state, crtc);
3952         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3953         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3954         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3955         bool psl_clkgate_wa;
3956
3957         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3958                 return;
3959
3960         if (!new_crtc_state->bigjoiner) {
3961                 intel_encoders_pre_pll_enable(state, crtc);
3962
3963                 if (new_crtc_state->shared_dpll)
3964                         intel_enable_shared_dpll(new_crtc_state);
3965
3966                 intel_encoders_pre_enable(state, crtc);
3967         } else {
3968                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3969         }
3970
3971         intel_set_pipe_src_size(new_crtc_state);
3972         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3973                 bdw_set_pipemisc(new_crtc_state);
3974
3975         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3976                 intel_set_transcoder_timings(new_crtc_state);
3977
3978                 if (cpu_transcoder != TRANSCODER_EDP)
3979                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3980                                        new_crtc_state->pixel_multiplier - 1);
3981
3982                 if (new_crtc_state->has_pch_encoder)
3983                         intel_cpu_transcoder_set_m_n(new_crtc_state,
3984                                                      &new_crtc_state->fdi_m_n, NULL);
3985
3986                 hsw_set_frame_start_delay(new_crtc_state);
3987         }
3988
3989         if (!transcoder_is_dsi(cpu_transcoder))
3990                 hsw_set_pipeconf(new_crtc_state);
3991
3992         crtc->active = true;
3993
3994         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3995         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3996                 new_crtc_state->pch_pfit.enabled;
3997         if (psl_clkgate_wa)
3998                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3999
4000         if (INTEL_GEN(dev_priv) >= 9)
4001                 skl_pfit_enable(new_crtc_state);
4002         else
4003                 ilk_pfit_enable(new_crtc_state);
4004
4005         /*
4006          * On ILK+ LUT must be loaded before the pipe is running but with
4007          * clocks enabled
4008          */
4009         intel_color_load_luts(new_crtc_state);
4010         intel_color_commit(new_crtc_state);
4011         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
4012         if (INTEL_GEN(dev_priv) < 9)
4013                 intel_disable_primary_plane(new_crtc_state);
4014
4015         hsw_set_linetime_wm(new_crtc_state);
4016
4017         if (INTEL_GEN(dev_priv) >= 11)
4018                 icl_set_pipe_chicken(crtc);
4019
4020         if (dev_priv->display.initial_watermarks)
4021                 dev_priv->display.initial_watermarks(state, crtc);
4022
4023         if (INTEL_GEN(dev_priv) >= 11)
4024                 icl_pipe_mbus_enable(crtc);
4025
4026         if (new_crtc_state->bigjoiner_slave) {
4027                 trace_intel_pipe_enable(crtc);
4028                 intel_crtc_vblank_on(new_crtc_state);
4029         }
4030
4031         intel_encoders_enable(state, crtc);
4032
4033         if (psl_clkgate_wa) {
4034                 intel_wait_for_vblank(dev_priv, pipe);
4035                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
4036         }
4037
4038         /* If we change the relative order between pipe/planes enabling, we need
4039          * to change the workaround. */
4040         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
4041         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
4042                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4043                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4044         }
4045 }
4046
4047 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4048 {
4049         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4050         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4051         enum pipe pipe = crtc->pipe;
4052
4053         /* To avoid upsetting the power well on haswell only disable the pfit if
4054          * it's in use. The hw state code will make sure we get this right. */
4055         if (!old_crtc_state->pch_pfit.enabled)
4056                 return;
4057
4058         intel_de_write(dev_priv, PF_CTL(pipe), 0);
4059         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
4060         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
4061 }
4062
4063 static void ilk_crtc_disable(struct intel_atomic_state *state,
4064                              struct intel_crtc *crtc)
4065 {
4066         const struct intel_crtc_state *old_crtc_state =
4067                 intel_atomic_get_old_crtc_state(state, crtc);
4068         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4069         enum pipe pipe = crtc->pipe;
4070
4071         /*
4072          * Sometimes spurious CPU pipe underruns happen when the
4073          * pipe is already disabled, but FDI RX/TX is still enabled.
4074          * Happens at least with VGA+HDMI cloning. Suppress them.
4075          */
4076         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4077         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4078
4079         intel_encoders_disable(state, crtc);
4080
4081         intel_crtc_vblank_off(old_crtc_state);
4082
4083         intel_disable_pipe(old_crtc_state);
4084
4085         ilk_pfit_disable(old_crtc_state);
4086
4087         if (old_crtc_state->has_pch_encoder)
4088                 ilk_fdi_disable(crtc);
4089
4090         intel_encoders_post_disable(state, crtc);
4091
4092         if (old_crtc_state->has_pch_encoder) {
4093                 ilk_disable_pch_transcoder(dev_priv, pipe);
4094
4095                 if (HAS_PCH_CPT(dev_priv)) {
4096                         i915_reg_t reg;
4097                         u32 temp;
4098
4099                         /* disable TRANS_DP_CTL */
4100                         reg = TRANS_DP_CTL(pipe);
4101                         temp = intel_de_read(dev_priv, reg);
4102                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4103                                   TRANS_DP_PORT_SEL_MASK);
4104                         temp |= TRANS_DP_PORT_SEL_NONE;
4105                         intel_de_write(dev_priv, reg, temp);
4106
4107                         /* disable DPLL_SEL */
4108                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
4109                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4110                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
4111                 }
4112
4113                 ilk_fdi_pll_disable(crtc);
4114         }
4115
4116         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4117         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4118 }
4119
4120 static void hsw_crtc_disable(struct intel_atomic_state *state,
4121                              struct intel_crtc *crtc)
4122 {
4123         /*
4124          * FIXME collapse everything to one hook.
4125          * Need care with mst->ddi interactions.
4126          */
4127         intel_encoders_disable(state, crtc);
4128         intel_encoders_post_disable(state, crtc);
4129 }
4130
4131 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
4132 {
4133         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4134         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4135
4136         if (!crtc_state->gmch_pfit.control)
4137                 return;
4138
4139         /*
4140          * The panel fitter should only be adjusted whilst the pipe is disabled,
4141          * according to register description and PRM.
4142          */
4143         drm_WARN_ON(&dev_priv->drm,
4144                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
4145         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
4146
4147         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
4148                        crtc_state->gmch_pfit.pgm_ratios);
4149         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
4150
4151         /* Border color in case we don't scale up to the full screen. Black by
4152          * default, change to something else for debugging. */
4153         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
4154 }
4155
4156 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
4157 {
4158         if (phy == PHY_NONE)
4159                 return false;
4160         else if (IS_ALDERLAKE_S(dev_priv))
4161                 return phy <= PHY_E;
4162         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
4163                 return phy <= PHY_D;
4164         else if (IS_JSL_EHL(dev_priv))
4165                 return phy <= PHY_C;
4166         else if (INTEL_GEN(dev_priv) >= 11)
4167                 return phy <= PHY_B;
4168         else
4169                 return false;
4170 }
4171
4172 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
4173 {
4174         if (IS_TIGERLAKE(dev_priv))
4175                 return phy >= PHY_D && phy <= PHY_I;
4176         else if (IS_ICELAKE(dev_priv))
4177                 return phy >= PHY_C && phy <= PHY_F;
4178         else
4179                 return false;
4180 }
4181
4182 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
4183 {
4184         if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
4185                 return PHY_B + port - PORT_TC1;
4186         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
4187                 return PHY_C + port - PORT_TC1;
4188         else if (IS_JSL_EHL(i915) && port == PORT_D)
4189                 return PHY_A;
4190
4191         return PHY_A + port - PORT_A;
4192 }
4193
4194 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
4195 {
4196         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
4197                 return TC_PORT_NONE;
4198
4199         if (INTEL_GEN(dev_priv) >= 12)
4200                 return TC_PORT_1 + port - PORT_TC1;
4201         else
4202                 return TC_PORT_1 + port - PORT_C;
4203 }
4204
4205 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
4206 {
4207         switch (port) {
4208         case PORT_A:
4209                 return POWER_DOMAIN_PORT_DDI_A_LANES;
4210         case PORT_B:
4211                 return POWER_DOMAIN_PORT_DDI_B_LANES;
4212         case PORT_C:
4213                 return POWER_DOMAIN_PORT_DDI_C_LANES;
4214         case PORT_D:
4215                 return POWER_DOMAIN_PORT_DDI_D_LANES;
4216         case PORT_E:
4217                 return POWER_DOMAIN_PORT_DDI_E_LANES;
4218         case PORT_F:
4219                 return POWER_DOMAIN_PORT_DDI_F_LANES;
4220         case PORT_G:
4221                 return POWER_DOMAIN_PORT_DDI_G_LANES;
4222         case PORT_H:
4223                 return POWER_DOMAIN_PORT_DDI_H_LANES;
4224         case PORT_I:
4225                 return POWER_DOMAIN_PORT_DDI_I_LANES;
4226         default:
4227                 MISSING_CASE(port);
4228                 return POWER_DOMAIN_PORT_OTHER;
4229         }
4230 }
4231
4232 enum intel_display_power_domain
4233 intel_aux_power_domain(struct intel_digital_port *dig_port)
4234 {
4235         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4236         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
4237
4238         if (intel_phy_is_tc(dev_priv, phy) &&
4239             dig_port->tc_mode == TC_PORT_TBT_ALT) {
4240                 switch (dig_port->aux_ch) {
4241                 case AUX_CH_C:
4242                         return POWER_DOMAIN_AUX_C_TBT;
4243                 case AUX_CH_D:
4244                         return POWER_DOMAIN_AUX_D_TBT;
4245                 case AUX_CH_E:
4246                         return POWER_DOMAIN_AUX_E_TBT;
4247                 case AUX_CH_F:
4248                         return POWER_DOMAIN_AUX_F_TBT;
4249                 case AUX_CH_G:
4250                         return POWER_DOMAIN_AUX_G_TBT;
4251                 case AUX_CH_H:
4252                         return POWER_DOMAIN_AUX_H_TBT;
4253                 case AUX_CH_I:
4254                         return POWER_DOMAIN_AUX_I_TBT;
4255                 default:
4256                         MISSING_CASE(dig_port->aux_ch);
4257                         return POWER_DOMAIN_AUX_C_TBT;
4258                 }
4259         }
4260
4261         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
4262 }
4263
4264 /*
4265  * Converts aux_ch to power_domain without caring about TBT ports for that use
4266  * intel_aux_power_domain()
4267  */
4268 enum intel_display_power_domain
4269 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
4270 {
4271         switch (aux_ch) {
4272         case AUX_CH_A:
4273                 return POWER_DOMAIN_AUX_A;
4274         case AUX_CH_B:
4275                 return POWER_DOMAIN_AUX_B;
4276         case AUX_CH_C:
4277                 return POWER_DOMAIN_AUX_C;
4278         case AUX_CH_D:
4279                 return POWER_DOMAIN_AUX_D;
4280         case AUX_CH_E:
4281                 return POWER_DOMAIN_AUX_E;
4282         case AUX_CH_F:
4283                 return POWER_DOMAIN_AUX_F;
4284         case AUX_CH_G:
4285                 return POWER_DOMAIN_AUX_G;
4286         case AUX_CH_H:
4287                 return POWER_DOMAIN_AUX_H;
4288         case AUX_CH_I:
4289                 return POWER_DOMAIN_AUX_I;
4290         default:
4291                 MISSING_CASE(aux_ch);
4292                 return POWER_DOMAIN_AUX_A;
4293         }
4294 }
4295
4296 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4297 {
4298         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4299         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4300         struct drm_encoder *encoder;
4301         enum pipe pipe = crtc->pipe;
4302         u64 mask;
4303         enum transcoder transcoder = crtc_state->cpu_transcoder;
4304
4305         if (!crtc_state->hw.active)
4306                 return 0;
4307
4308         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
4309         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
4310         if (crtc_state->pch_pfit.enabled ||
4311             crtc_state->pch_pfit.force_thru)
4312                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4313
4314         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
4315                                   crtc_state->uapi.encoder_mask) {
4316                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4317
4318                 mask |= BIT_ULL(intel_encoder->power_domain);
4319         }
4320
4321         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
4322                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
4323
4324         if (crtc_state->shared_dpll)
4325                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
4326
4327         if (crtc_state->dsc.compression_enable)
4328                 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
4329
4330         return mask;
4331 }
4332
4333 static u64
4334 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4335 {
4336         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4337         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4338         enum intel_display_power_domain domain;
4339         u64 domains, new_domains, old_domains;
4340
4341         domains = get_crtc_power_domains(crtc_state);
4342
4343         new_domains = domains & ~crtc->enabled_power_domains.mask;
4344         old_domains = crtc->enabled_power_domains.mask & ~domains;
4345
4346         for_each_power_domain(domain, new_domains)
4347                 intel_display_power_get_in_set(dev_priv,
4348                                                &crtc->enabled_power_domains,
4349                                                domain);
4350
4351         return old_domains;
4352 }
4353
4354 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
4355                                            u64 domains)
4356 {
4357         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
4358                                             &crtc->enabled_power_domains,
4359                                             domains);
4360 }
4361
4362 static void valleyview_crtc_enable(struct intel_atomic_state *state,
4363                                    struct intel_crtc *crtc)
4364 {
4365         const struct intel_crtc_state *new_crtc_state =
4366                 intel_atomic_get_new_crtc_state(state, crtc);
4367         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4368         enum pipe pipe = crtc->pipe;
4369
4370         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4371                 return;
4372
4373         if (intel_crtc_has_dp_encoder(new_crtc_state))
4374                 intel_dp_set_m_n(new_crtc_state, M1_N1);
4375
4376         intel_set_transcoder_timings(new_crtc_state);
4377         intel_set_pipe_src_size(new_crtc_state);
4378
4379         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4380                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4381                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
4382         }
4383
4384         i9xx_set_pipeconf(new_crtc_state);
4385
4386         crtc->active = true;
4387
4388         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4389
4390         intel_encoders_pre_pll_enable(state, crtc);
4391
4392         if (IS_CHERRYVIEW(dev_priv)) {
4393                 chv_prepare_pll(crtc, new_crtc_state);
4394                 chv_enable_pll(crtc, new_crtc_state);
4395         } else {
4396                 vlv_prepare_pll(crtc, new_crtc_state);
4397                 vlv_enable_pll(crtc, new_crtc_state);
4398         }
4399
4400         intel_encoders_pre_enable(state, crtc);
4401
4402         i9xx_pfit_enable(new_crtc_state);
4403
4404         intel_color_load_luts(new_crtc_state);
4405         intel_color_commit(new_crtc_state);
4406         /* update DSPCNTR to configure gamma for pipe bottom color */
4407         intel_disable_primary_plane(new_crtc_state);
4408
4409         dev_priv->display.initial_watermarks(state, crtc);
4410         intel_enable_pipe(new_crtc_state);
4411
4412         intel_crtc_vblank_on(new_crtc_state);
4413
4414         intel_encoders_enable(state, crtc);
4415 }
4416
4417 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
4418 {
4419         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4420         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4421
4422         intel_de_write(dev_priv, FP0(crtc->pipe),
4423                        crtc_state->dpll_hw_state.fp0);
4424         intel_de_write(dev_priv, FP1(crtc->pipe),
4425                        crtc_state->dpll_hw_state.fp1);
4426 }
4427
4428 static void i9xx_crtc_enable(struct intel_atomic_state *state,
4429                              struct intel_crtc *crtc)
4430 {
4431         const struct intel_crtc_state *new_crtc_state =
4432                 intel_atomic_get_new_crtc_state(state, crtc);
4433         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4434         enum pipe pipe = crtc->pipe;
4435
4436         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4437                 return;
4438
4439         i9xx_set_pll_dividers(new_crtc_state);
4440
4441         if (intel_crtc_has_dp_encoder(new_crtc_state))
4442                 intel_dp_set_m_n(new_crtc_state, M1_N1);
4443
4444         intel_set_transcoder_timings(new_crtc_state);
4445         intel_set_pipe_src_size(new_crtc_state);
4446
4447         i9xx_set_pipeconf(new_crtc_state);
4448
4449         crtc->active = true;
4450
4451         if (!IS_GEN(dev_priv, 2))
4452                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4453
4454         intel_encoders_pre_enable(state, crtc);
4455
4456         i9xx_enable_pll(crtc, new_crtc_state);
4457
4458         i9xx_pfit_enable(new_crtc_state);
4459
4460         intel_color_load_luts(new_crtc_state);
4461         intel_color_commit(new_crtc_state);
4462         /* update DSPCNTR to configure gamma for pipe bottom color */
4463         intel_disable_primary_plane(new_crtc_state);
4464
4465         if (dev_priv->display.initial_watermarks)
4466                 dev_priv->display.initial_watermarks(state, crtc);
4467         else
4468                 intel_update_watermarks(crtc);
4469         intel_enable_pipe(new_crtc_state);
4470
4471         intel_crtc_vblank_on(new_crtc_state);
4472
4473         intel_encoders_enable(state, crtc);
4474
4475         /* prevents spurious underruns */
4476         if (IS_GEN(dev_priv, 2))
4477                 intel_wait_for_vblank(dev_priv, pipe);
4478 }
4479
4480 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4481 {
4482         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4483         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4484
4485         if (!old_crtc_state->gmch_pfit.control)
4486                 return;
4487
4488         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
4489
4490         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
4491                     intel_de_read(dev_priv, PFIT_CONTROL));
4492         intel_de_write(dev_priv, PFIT_CONTROL, 0);
4493 }
4494
4495 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4496                               struct intel_crtc *crtc)
4497 {
4498         struct intel_crtc_state *old_crtc_state =
4499                 intel_atomic_get_old_crtc_state(state, crtc);
4500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4501         enum pipe pipe = crtc->pipe;
4502
4503         /*
4504          * On gen2 planes are double buffered but the pipe isn't, so we must
4505          * wait for planes to fully turn off before disabling the pipe.
4506          */
4507         if (IS_GEN(dev_priv, 2))
4508                 intel_wait_for_vblank(dev_priv, pipe);
4509
4510         intel_encoders_disable(state, crtc);
4511
4512         intel_crtc_vblank_off(old_crtc_state);
4513
4514         intel_disable_pipe(old_crtc_state);
4515
4516         i9xx_pfit_disable(old_crtc_state);
4517
4518         intel_encoders_post_disable(state, crtc);
4519
4520         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4521                 if (IS_CHERRYVIEW(dev_priv))
4522                         chv_disable_pll(dev_priv, pipe);
4523                 else if (IS_VALLEYVIEW(dev_priv))
4524                         vlv_disable_pll(dev_priv, pipe);
4525                 else
4526                         i9xx_disable_pll(old_crtc_state);
4527         }
4528
4529         intel_encoders_post_pll_disable(state, crtc);
4530
4531         if (!IS_GEN(dev_priv, 2))
4532                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4533
4534         if (!dev_priv->display.initial_watermarks)
4535                 intel_update_watermarks(crtc);
4536
4537         /* clock the pipe down to 640x480@60 to potentially save power */
4538         if (IS_I830(dev_priv))
4539                 i830_enable_pipe(dev_priv, pipe);
4540 }
4541
4542 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4543                                         struct drm_modeset_acquire_ctx *ctx)
4544 {
4545         struct intel_encoder *encoder;
4546         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4547         struct intel_bw_state *bw_state =
4548                 to_intel_bw_state(dev_priv->bw_obj.state);
4549         struct intel_cdclk_state *cdclk_state =
4550                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4551         struct intel_dbuf_state *dbuf_state =
4552                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4553         struct intel_crtc_state *crtc_state =
4554                 to_intel_crtc_state(crtc->base.state);
4555         struct intel_plane *plane;
4556         struct drm_atomic_state *state;
4557         struct intel_crtc_state *temp_crtc_state;
4558         enum pipe pipe = crtc->pipe;
4559         int ret;
4560
4561         if (!crtc_state->hw.active)
4562                 return;
4563
4564         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4565                 const struct intel_plane_state *plane_state =
4566                         to_intel_plane_state(plane->base.state);
4567
4568                 if (plane_state->uapi.visible)
4569                         intel_plane_disable_noatomic(crtc, plane);
4570         }
4571
4572         state = drm_atomic_state_alloc(&dev_priv->drm);
4573         if (!state) {
4574                 drm_dbg_kms(&dev_priv->drm,
4575                             "failed to disable [CRTC:%d:%s], out of memory",
4576                             crtc->base.base.id, crtc->base.name);
4577                 return;
4578         }
4579
4580         state->acquire_ctx = ctx;
4581
4582         /* Everything's already locked, -EDEADLK can't happen. */
4583         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4584         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4585
4586         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4587
4588         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4589
4590         drm_atomic_state_put(state);
4591
4592         drm_dbg_kms(&dev_priv->drm,
4593                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4594                     crtc->base.base.id, crtc->base.name);
4595
4596         crtc->active = false;
4597         crtc->base.enabled = false;
4598
4599         drm_WARN_ON(&dev_priv->drm,
4600                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4601         crtc_state->uapi.active = false;
4602         crtc_state->uapi.connector_mask = 0;
4603         crtc_state->uapi.encoder_mask = 0;
4604         intel_crtc_free_hw_state(crtc_state);
4605         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4606
4607         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4608                 encoder->base.crtc = NULL;
4609
4610         intel_fbc_disable(crtc);
4611         intel_update_watermarks(crtc);
4612         intel_disable_shared_dpll(crtc_state);
4613
4614         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4615
4616         dev_priv->active_pipes &= ~BIT(pipe);
4617         cdclk_state->min_cdclk[pipe] = 0;
4618         cdclk_state->min_voltage_level[pipe] = 0;
4619         cdclk_state->active_pipes &= ~BIT(pipe);
4620
4621         dbuf_state->active_pipes &= ~BIT(pipe);
4622
4623         bw_state->data_rate[pipe] = 0;
4624         bw_state->num_active_planes[pipe] = 0;
4625 }
4626
4627 /*
4628  * turn all crtc's off, but do not adjust state
4629  * This has to be paired with a call to intel_modeset_setup_hw_state.
4630  */
4631 int intel_display_suspend(struct drm_device *dev)
4632 {
4633         struct drm_i915_private *dev_priv = to_i915(dev);
4634         struct drm_atomic_state *state;
4635         int ret;
4636
4637         state = drm_atomic_helper_suspend(dev);
4638         ret = PTR_ERR_OR_ZERO(state);
4639         if (ret)
4640                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4641                         ret);
4642         else
4643                 dev_priv->modeset_restore_state = state;
4644         return ret;
4645 }
4646
4647 void intel_encoder_destroy(struct drm_encoder *encoder)
4648 {
4649         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4650
4651         drm_encoder_cleanup(encoder);
4652         kfree(intel_encoder);
4653 }
4654
4655 /* Cross check the actual hw state with our own modeset state tracking (and it's
4656  * internal consistency). */
4657 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4658                                          struct drm_connector_state *conn_state)
4659 {
4660         struct intel_connector *connector = to_intel_connector(conn_state->connector);
4661         struct drm_i915_private *i915 = to_i915(connector->base.dev);
4662
4663         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4664                     connector->base.base.id, connector->base.name);
4665
4666         if (connector->get_hw_state(connector)) {
4667                 struct intel_encoder *encoder = intel_attached_encoder(connector);
4668
4669                 I915_STATE_WARN(!crtc_state,
4670                          "connector enabled without attached crtc\n");
4671
4672                 if (!crtc_state)
4673                         return;
4674
4675                 I915_STATE_WARN(!crtc_state->hw.active,
4676                                 "connector is active, but attached crtc isn't\n");
4677
4678                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4679                         return;
4680
4681                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4682                         "atomic encoder doesn't match attached encoder\n");
4683
4684                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4685                         "attached encoder crtc differs from connector crtc\n");
4686         } else {
4687                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4688                                 "attached crtc is active, but connector isn't\n");
4689                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4690                         "best encoder set without crtc!\n");
4691         }
4692 }
4693
4694 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4695 {
4696         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4697         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4698
4699         /* IPS only exists on ULT machines and is tied to pipe A. */
4700         if (!hsw_crtc_supports_ips(crtc))
4701                 return false;
4702
4703         if (!dev_priv->params.enable_ips)
4704                 return false;
4705
4706         if (crtc_state->pipe_bpp > 24)
4707                 return false;
4708
4709         /*
4710          * We compare against max which means we must take
4711          * the increased cdclk requirement into account when
4712          * calculating the new cdclk.
4713          *
4714          * Should measure whether using a lower cdclk w/o IPS
4715          */
4716         if (IS_BROADWELL(dev_priv) &&
4717             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4718                 return false;
4719
4720         return true;
4721 }
4722
4723 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4724 {
4725         struct drm_i915_private *dev_priv =
4726                 to_i915(crtc_state->uapi.crtc->dev);
4727         struct intel_atomic_state *state =
4728                 to_intel_atomic_state(crtc_state->uapi.state);
4729
4730         crtc_state->ips_enabled = false;
4731
4732         if (!hsw_crtc_state_ips_capable(crtc_state))
4733                 return 0;
4734
4735         /*
4736          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4737          * enabled and disabled dynamically based on package C states,
4738          * user space can't make reliable use of the CRCs, so let's just
4739          * completely disable it.
4740          */
4741         if (crtc_state->crc_enabled)
4742                 return 0;
4743
4744         /* IPS should be fine as long as at least one plane is enabled. */
4745         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4746                 return 0;
4747
4748         if (IS_BROADWELL(dev_priv)) {
4749                 const struct intel_cdclk_state *cdclk_state;
4750
4751                 cdclk_state = intel_atomic_get_cdclk_state(state);
4752                 if (IS_ERR(cdclk_state))
4753                         return PTR_ERR(cdclk_state);
4754
4755                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4756                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4757                         return 0;
4758         }
4759
4760         crtc_state->ips_enabled = true;
4761
4762         return 0;
4763 }
4764
4765 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4766 {
4767         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4768
4769         /* GDG double wide on either pipe, otherwise pipe A only */
4770         return INTEL_GEN(dev_priv) < 4 &&
4771                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4772 }
4773
4774 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4775 {
4776         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4777         unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
4778
4779         /*
4780          * We only use IF-ID interlacing. If we ever use
4781          * PF-ID we'll need to adjust the pixel_rate here.
4782          */
4783
4784         if (!crtc_state->pch_pfit.enabled)
4785                 return pixel_rate;
4786
4787         pipe_w = crtc_state->pipe_src_w;
4788         pipe_h = crtc_state->pipe_src_h;
4789
4790         pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
4791         pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
4792
4793         if (pipe_w < pfit_w)
4794                 pipe_w = pfit_w;
4795         if (pipe_h < pfit_h)
4796                 pipe_h = pfit_h;
4797
4798         if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4799                         !pfit_w || !pfit_h))
4800                 return pixel_rate;
4801
4802         return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4803                        pfit_w * pfit_h);
4804 }
4805
4806 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4807                                          const struct drm_display_mode *timings)
4808 {
4809         mode->hdisplay = timings->crtc_hdisplay;
4810         mode->htotal = timings->crtc_htotal;
4811         mode->hsync_start = timings->crtc_hsync_start;
4812         mode->hsync_end = timings->crtc_hsync_end;
4813
4814         mode->vdisplay = timings->crtc_vdisplay;
4815         mode->vtotal = timings->crtc_vtotal;
4816         mode->vsync_start = timings->crtc_vsync_start;
4817         mode->vsync_end = timings->crtc_vsync_end;
4818
4819         mode->flags = timings->flags;
4820         mode->type = DRM_MODE_TYPE_DRIVER;
4821
4822         mode->clock = timings->crtc_clock;
4823
4824         drm_mode_set_name(mode);
4825 }
4826
4827 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4828 {
4829         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4830
4831         if (HAS_GMCH(dev_priv))
4832                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
4833                 crtc_state->pixel_rate =
4834                         crtc_state->hw.pipe_mode.crtc_clock;
4835         else
4836                 crtc_state->pixel_rate =
4837                         ilk_pipe_pixel_rate(crtc_state);
4838 }
4839
4840 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4841 {
4842         struct drm_display_mode *mode = &crtc_state->hw.mode;
4843         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4844         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4845
4846         drm_mode_copy(pipe_mode, adjusted_mode);
4847
4848         if (crtc_state->bigjoiner) {
4849                 /*
4850                  * transcoder is programmed to the full mode,
4851                  * but pipe timings are half of the transcoder mode
4852                  */
4853                 pipe_mode->crtc_hdisplay /= 2;
4854                 pipe_mode->crtc_hblank_start /= 2;
4855                 pipe_mode->crtc_hblank_end /= 2;
4856                 pipe_mode->crtc_hsync_start /= 2;
4857                 pipe_mode->crtc_hsync_end /= 2;
4858                 pipe_mode->crtc_htotal /= 2;
4859                 pipe_mode->crtc_clock /= 2;
4860         }
4861
4862         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4863         intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4864
4865         intel_crtc_compute_pixel_rate(crtc_state);
4866
4867         drm_mode_copy(mode, adjusted_mode);
4868         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4869         mode->vdisplay = crtc_state->pipe_src_h;
4870 }
4871
4872 static void intel_encoder_get_config(struct intel_encoder *encoder,
4873                                      struct intel_crtc_state *crtc_state)
4874 {
4875         encoder->get_config(encoder, crtc_state);
4876
4877         intel_crtc_readout_derived_state(crtc_state);
4878 }
4879
4880 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4881                                      struct intel_crtc_state *pipe_config)
4882 {
4883         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4884         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4885         int clock_limit = dev_priv->max_dotclk_freq;
4886
4887         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4888
4889         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4890         if (pipe_config->bigjoiner) {
4891                 pipe_mode->crtc_clock /= 2;
4892                 pipe_mode->crtc_hdisplay /= 2;
4893                 pipe_mode->crtc_hblank_start /= 2;
4894                 pipe_mode->crtc_hblank_end /= 2;
4895                 pipe_mode->crtc_hsync_start /= 2;
4896                 pipe_mode->crtc_hsync_end /= 2;
4897                 pipe_mode->crtc_htotal /= 2;
4898                 pipe_config->pipe_src_w /= 2;
4899         }
4900
4901         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4902
4903         if (INTEL_GEN(dev_priv) < 4) {
4904                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4905
4906                 /*
4907                  * Enable double wide mode when the dot clock
4908                  * is > 90% of the (display) core speed.
4909                  */
4910                 if (intel_crtc_supports_double_wide(crtc) &&
4911                     pipe_mode->crtc_clock > clock_limit) {
4912                         clock_limit = dev_priv->max_dotclk_freq;
4913                         pipe_config->double_wide = true;
4914                 }
4915         }
4916
4917         if (pipe_mode->crtc_clock > clock_limit) {
4918                 drm_dbg_kms(&dev_priv->drm,
4919                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4920                             pipe_mode->crtc_clock, clock_limit,
4921                             yesno(pipe_config->double_wide));
4922                 return -EINVAL;
4923         }
4924
4925         /*
4926          * Pipe horizontal size must be even in:
4927          * - DVO ganged mode
4928          * - LVDS dual channel mode
4929          * - Double wide pipe
4930          */
4931         if (pipe_config->pipe_src_w & 1) {
4932                 if (pipe_config->double_wide) {
4933                         drm_dbg_kms(&dev_priv->drm,
4934                                     "Odd pipe source width not supported with double wide pipe\n");
4935                         return -EINVAL;
4936                 }
4937
4938                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4939                     intel_is_dual_link_lvds(dev_priv)) {
4940                         drm_dbg_kms(&dev_priv->drm,
4941                                     "Odd pipe source width not supported with dual link LVDS\n");
4942                         return -EINVAL;
4943                 }
4944         }
4945
4946         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4947          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4948          */
4949         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4950             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4951                 return -EINVAL;
4952
4953         intel_crtc_compute_pixel_rate(pipe_config);
4954
4955         if (pipe_config->has_pch_encoder)
4956                 return ilk_fdi_compute_config(crtc, pipe_config);
4957
4958         return 0;
4959 }
4960
4961 static void
4962 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4963 {
4964         while (*num > DATA_LINK_M_N_MASK ||
4965                *den > DATA_LINK_M_N_MASK) {
4966                 *num >>= 1;
4967                 *den >>= 1;
4968         }
4969 }
4970
4971 static void compute_m_n(unsigned int m, unsigned int n,
4972                         u32 *ret_m, u32 *ret_n,
4973                         bool constant_n)
4974 {
4975         /*
4976          * Several DP dongles in particular seem to be fussy about
4977          * too large link M/N values. Give N value as 0x8000 that
4978          * should be acceptable by specific devices. 0x8000 is the
4979          * specified fixed N value for asynchronous clock mode,
4980          * which the devices expect also in synchronous clock mode.
4981          */
4982         if (constant_n)
4983                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
4984         else
4985                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4986
4987         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4988         intel_reduce_m_n_ratio(ret_m, ret_n);
4989 }
4990
4991 void
4992 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4993                        int pixel_clock, int link_clock,
4994                        struct intel_link_m_n *m_n,
4995                        bool constant_n, bool fec_enable)
4996 {
4997         u32 data_clock = bits_per_pixel * pixel_clock;
4998
4999         if (fec_enable)
5000                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
5001
5002         m_n->tu = 64;
5003         compute_m_n(data_clock,
5004                     link_clock * nlanes * 8,
5005                     &m_n->gmch_m, &m_n->gmch_n,
5006                     constant_n);
5007
5008         compute_m_n(pixel_clock, link_clock,
5009                     &m_n->link_m, &m_n->link_n,
5010                     constant_n);
5011 }
5012
5013 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
5014 {
5015         /*
5016          * There may be no VBT; and if the BIOS enabled SSC we can
5017          * just keep using it to avoid unnecessary flicker.  Whereas if the
5018          * BIOS isn't using it, don't assume it will work even if the VBT
5019          * indicates as much.
5020          */
5021         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5022                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
5023                                                        PCH_DREF_CONTROL) &
5024                         DREF_SSC1_ENABLE;
5025
5026                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
5027                         drm_dbg_kms(&dev_priv->drm,
5028                                     "SSC %s by BIOS, overriding VBT which says %s\n",
5029                                     enableddisabled(bios_lvds_use_ssc),
5030                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
5031                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
5032                 }
5033         }
5034 }
5035
5036 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5037                                          const struct intel_link_m_n *m_n)
5038 {
5039         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5040         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5041         enum pipe pipe = crtc->pipe;
5042
5043         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
5044                        TU_SIZE(m_n->tu) | m_n->gmch_m);
5045         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5046         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5047         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5048 }
5049
5050 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
5051                                  enum transcoder transcoder)
5052 {
5053         if (IS_HASWELL(dev_priv))
5054                 return transcoder == TRANSCODER_EDP;
5055
5056         /*
5057          * Strictly speaking some registers are available before
5058          * gen7, but we only support DRRS on gen7+
5059          */
5060         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
5061 }
5062
5063 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5064                                          const struct intel_link_m_n *m_n,
5065                                          const struct intel_link_m_n *m2_n2)
5066 {
5067         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5068         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5069         enum pipe pipe = crtc->pipe;
5070         enum transcoder transcoder = crtc_state->cpu_transcoder;
5071
5072         if (INTEL_GEN(dev_priv) >= 5) {
5073                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
5074                                TU_SIZE(m_n->tu) | m_n->gmch_m);
5075                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
5076                                m_n->gmch_n);
5077                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
5078                                m_n->link_m);
5079                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
5080                                m_n->link_n);
5081                 /*
5082                  *  M2_N2 registers are set only if DRRS is supported
5083                  * (to make sure the registers are not unnecessarily accessed).
5084                  */
5085                 if (m2_n2 && crtc_state->has_drrs &&
5086                     transcoder_has_m2_n2(dev_priv, transcoder)) {
5087                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
5088                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5089                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
5090                                        m2_n2->gmch_n);
5091                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
5092                                        m2_n2->link_m);
5093                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
5094                                        m2_n2->link_n);
5095                 }
5096         } else {
5097                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
5098                                TU_SIZE(m_n->tu) | m_n->gmch_m);
5099                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5100                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
5101                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
5102         }
5103 }
5104
5105 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
5106 {
5107         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5108         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
5109
5110         if (m_n == M1_N1) {
5111                 dp_m_n = &crtc_state->dp_m_n;
5112                 dp_m2_n2 = &crtc_state->dp_m2_n2;
5113         } else if (m_n == M2_N2) {
5114
5115                 /*
5116                  * M2_N2 registers are not supported. Hence m2_n2 divider value
5117                  * needs to be programmed into M1_N1.
5118                  */
5119                 dp_m_n = &crtc_state->dp_m2_n2;
5120         } else {
5121                 drm_err(&i915->drm, "Unsupported divider value\n");
5122                 return;
5123         }
5124
5125         if (crtc_state->has_pch_encoder)
5126                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
5127         else
5128                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
5129 }
5130
5131 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
5132 {
5133         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5134         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5135         enum pipe pipe = crtc->pipe;
5136         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5137         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
5138         u32 crtc_vtotal, crtc_vblank_end;
5139         int vsyncshift = 0;
5140
5141         /* We need to be careful not to changed the adjusted mode, for otherwise
5142          * the hw state checker will get angry at the mismatch. */
5143         crtc_vtotal = adjusted_mode->crtc_vtotal;
5144         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5145
5146         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5147                 /* the chip adds 2 halflines automatically */
5148                 crtc_vtotal -= 1;
5149                 crtc_vblank_end -= 1;
5150
5151                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5152                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5153                 else
5154                         vsyncshift = adjusted_mode->crtc_hsync_start -
5155                                 adjusted_mode->crtc_htotal / 2;
5156                 if (vsyncshift < 0)
5157                         vsyncshift += adjusted_mode->crtc_htotal;
5158         }
5159
5160         if (INTEL_GEN(dev_priv) > 3)
5161                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
5162                                vsyncshift);
5163
5164         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
5165                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
5166         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
5167                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
5168         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
5169                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
5170
5171         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
5172                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
5173         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
5174                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
5175         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
5176                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
5177
5178         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5179          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5180          * documented on the DDI_FUNC_CTL register description, EDP Input Select
5181          * bits. */
5182         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
5183             (pipe == PIPE_B || pipe == PIPE_C))
5184                 intel_de_write(dev_priv, VTOTAL(pipe),
5185                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5186
5187 }
5188
5189 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
5190 {
5191         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5192         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5193         enum pipe pipe = crtc->pipe;
5194
5195         /* pipesrc controls the size that is scaled from, which should
5196          * always be the user's requested size.
5197          */
5198         intel_de_write(dev_priv, PIPESRC(pipe),
5199                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
5200 }
5201
5202 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
5203 {
5204         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5205         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5206
5207         if (IS_GEN(dev_priv, 2))
5208                 return false;
5209
5210         if (INTEL_GEN(dev_priv) >= 9 ||
5211             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
5212                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
5213         else
5214                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
5215 }
5216
5217 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
5218                                          struct intel_crtc_state *pipe_config)
5219 {
5220         struct drm_device *dev = crtc->base.dev;
5221         struct drm_i915_private *dev_priv = to_i915(dev);
5222         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5223         u32 tmp;
5224
5225         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
5226         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5227         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5228
5229         if (!transcoder_is_dsi(cpu_transcoder)) {
5230                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
5231                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
5232                                                         (tmp & 0xffff) + 1;
5233                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
5234                                                 ((tmp >> 16) & 0xffff) + 1;
5235         }
5236         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
5237         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5238         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5239
5240         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
5241         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5242         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5243
5244         if (!transcoder_is_dsi(cpu_transcoder)) {
5245                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
5246                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
5247                                                         (tmp & 0xffff) + 1;
5248                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
5249                                                 ((tmp >> 16) & 0xffff) + 1;
5250         }
5251         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
5252         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5253         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5254
5255         if (intel_pipe_is_interlaced(pipe_config)) {
5256                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5257                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
5258                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
5259         }
5260 }
5261
5262 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
5263                                     struct intel_crtc_state *pipe_config)
5264 {
5265         struct drm_device *dev = crtc->base.dev;
5266         struct drm_i915_private *dev_priv = to_i915(dev);
5267         u32 tmp;
5268
5269         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
5270         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5271         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5272 }
5273
5274 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
5275 {
5276         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5277         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5278         u32 pipeconf;
5279
5280         pipeconf = 0;
5281
5282         /* we keep both pipes enabled on 830 */
5283         if (IS_I830(dev_priv))
5284                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
5285
5286         if (crtc_state->double_wide)
5287                 pipeconf |= PIPECONF_DOUBLE_WIDE;
5288
5289         /* only g4x and later have fancy bpc/dither controls */
5290         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5291             IS_CHERRYVIEW(dev_priv)) {
5292                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5293                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
5294                         pipeconf |= PIPECONF_DITHER_EN |
5295                                     PIPECONF_DITHER_TYPE_SP;
5296
5297                 switch (crtc_state->pipe_bpp) {
5298                 case 18:
5299                         pipeconf |= PIPECONF_6BPC;
5300                         break;
5301                 case 24:
5302                         pipeconf |= PIPECONF_8BPC;
5303                         break;
5304                 case 30:
5305                         pipeconf |= PIPECONF_10BPC;
5306                         break;
5307                 default:
5308                         /* Case prevented by intel_choose_pipe_bpp_dither. */
5309                         BUG();
5310                 }
5311         }
5312
5313         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5314                 if (INTEL_GEN(dev_priv) < 4 ||
5315                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5316                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5317                 else
5318                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5319         } else {
5320                 pipeconf |= PIPECONF_PROGRESSIVE;
5321         }
5322
5323         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5324              crtc_state->limited_color_range)
5325                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5326
5327         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5328
5329         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5330
5331         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
5332         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
5333 }
5334
5335 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
5336 {
5337         if (IS_I830(dev_priv))
5338                 return false;
5339
5340         return INTEL_GEN(dev_priv) >= 4 ||
5341                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
5342 }
5343
5344 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
5345 {
5346         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5347         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5348         u32 tmp;
5349
5350         if (!i9xx_has_pfit(dev_priv))
5351                 return;
5352
5353         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
5354         if (!(tmp & PFIT_ENABLE))
5355                 return;
5356
5357         /* Check whether the pfit is attached to our pipe. */
5358         if (INTEL_GEN(dev_priv) < 4) {
5359                 if (crtc->pipe != PIPE_B)
5360                         return;
5361         } else {
5362                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5363                         return;
5364         }
5365
5366         crtc_state->gmch_pfit.control = tmp;
5367         crtc_state->gmch_pfit.pgm_ratios =
5368                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
5369 }
5370
5371 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5372                                struct intel_crtc_state *pipe_config)
5373 {
5374         struct drm_device *dev = crtc->base.dev;
5375         struct drm_i915_private *dev_priv = to_i915(dev);
5376         enum pipe pipe = crtc->pipe;
5377         struct dpll clock;
5378         u32 mdiv;
5379         int refclk = 100000;
5380
5381         /* In case of DSI, DPLL will not be used */
5382         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5383                 return;
5384
5385         vlv_dpio_get(dev_priv);
5386         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5387         vlv_dpio_put(dev_priv);
5388
5389         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5390         clock.m2 = mdiv & DPIO_M2DIV_MASK;
5391         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5392         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5393         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5394
5395         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
5396 }
5397
5398 static void chv_crtc_clock_get(struct intel_crtc *crtc,
5399                                struct intel_crtc_state *pipe_config)
5400 {
5401         struct drm_device *dev = crtc->base.dev;
5402         struct drm_i915_private *dev_priv = to_i915(dev);
5403         enum pipe pipe = crtc->pipe;
5404         enum dpio_channel port = vlv_pipe_to_channel(pipe);
5405         struct dpll clock;
5406         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5407         int refclk = 100000;
5408
5409         /* In case of DSI, DPLL will not be used */
5410         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5411                 return;
5412
5413         vlv_dpio_get(dev_priv);
5414         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
5415         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
5416         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
5417         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
5418         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
5419         vlv_dpio_put(dev_priv);
5420
5421         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
5422         clock.m2 = (pll_dw0 & 0xff) << 22;
5423         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
5424                 clock.m2 |= pll_dw2 & 0x3fffff;
5425         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
5426         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
5427         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
5428
5429         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5430 }
5431
5432 static enum intel_output_format
5433 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
5434 {
5435         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5436         u32 tmp;
5437
5438         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5439
5440         if (tmp & PIPEMISC_YUV420_ENABLE) {
5441                 /* We support 4:2:0 in full blend mode only */
5442                 drm_WARN_ON(&dev_priv->drm,
5443                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
5444
5445                 return INTEL_OUTPUT_FORMAT_YCBCR420;
5446         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
5447                 return INTEL_OUTPUT_FORMAT_YCBCR444;
5448         } else {
5449                 return INTEL_OUTPUT_FORMAT_RGB;
5450         }
5451 }
5452
5453 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
5454 {
5455         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5456         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5458         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
5459         u32 tmp;
5460
5461         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
5462
5463         if (tmp & DISPPLANE_GAMMA_ENABLE)
5464                 crtc_state->gamma_enable = true;
5465
5466         if (!HAS_GMCH(dev_priv) &&
5467             tmp & DISPPLANE_PIPE_CSC_ENABLE)
5468                 crtc_state->csc_enable = true;
5469 }
5470
5471 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5472                                  struct intel_crtc_state *pipe_config)
5473 {
5474         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5475         enum intel_display_power_domain power_domain;
5476         intel_wakeref_t wakeref;
5477         u32 tmp;
5478         bool ret;
5479
5480         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5481         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5482         if (!wakeref)
5483                 return false;
5484
5485         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5486         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5487         pipe_config->shared_dpll = NULL;
5488
5489         ret = false;
5490
5491         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5492         if (!(tmp & PIPECONF_ENABLE))
5493                 goto out;
5494
5495         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5496             IS_CHERRYVIEW(dev_priv)) {
5497                 switch (tmp & PIPECONF_BPC_MASK) {
5498                 case PIPECONF_6BPC:
5499                         pipe_config->pipe_bpp = 18;
5500                         break;
5501                 case PIPECONF_8BPC:
5502                         pipe_config->pipe_bpp = 24;
5503                         break;
5504                 case PIPECONF_10BPC:
5505                         pipe_config->pipe_bpp = 30;
5506                         break;
5507                 default:
5508                         break;
5509                 }
5510         }
5511
5512         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5513             (tmp & PIPECONF_COLOR_RANGE_SELECT))
5514                 pipe_config->limited_color_range = true;
5515
5516         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5517                 PIPECONF_GAMMA_MODE_SHIFT;
5518
5519         if (IS_CHERRYVIEW(dev_priv))
5520                 pipe_config->cgm_mode = intel_de_read(dev_priv,
5521                                                       CGM_PIPE_MODE(crtc->pipe));
5522
5523         i9xx_get_pipe_color_config(pipe_config);
5524         intel_color_get_config(pipe_config);
5525
5526         if (INTEL_GEN(dev_priv) < 4)
5527                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5528
5529         intel_get_transcoder_timings(crtc, pipe_config);
5530         intel_get_pipe_src_size(crtc, pipe_config);
5531
5532         i9xx_get_pfit_config(pipe_config);
5533
5534         if (INTEL_GEN(dev_priv) >= 4) {
5535                 /* No way to read it out on pipes B and C */
5536                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5537                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
5538                 else
5539                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5540                 pipe_config->pixel_multiplier =
5541                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5542                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5543                 pipe_config->dpll_hw_state.dpll_md = tmp;
5544         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5545                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5546                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5547                 pipe_config->pixel_multiplier =
5548                         ((tmp & SDVO_MULTIPLIER_MASK)
5549                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5550         } else {
5551                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
5552                  * port and will be fixed up in the encoder->get_config
5553                  * function. */
5554                 pipe_config->pixel_multiplier = 1;
5555         }
5556         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5557                                                         DPLL(crtc->pipe));
5558         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5559                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5560                                                                FP0(crtc->pipe));
5561                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5562                                                                FP1(crtc->pipe));
5563         } else {
5564                 /* Mask out read-only status bits. */
5565                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5566                                                      DPLL_PORTC_READY_MASK |
5567                                                      DPLL_PORTB_READY_MASK);
5568         }
5569
5570         if (IS_CHERRYVIEW(dev_priv))
5571                 chv_crtc_clock_get(crtc, pipe_config);
5572         else if (IS_VALLEYVIEW(dev_priv))
5573                 vlv_crtc_clock_get(crtc, pipe_config);
5574         else
5575                 i9xx_crtc_clock_get(crtc, pipe_config);
5576
5577         /*
5578          * Normally the dotclock is filled in by the encoder .get_config()
5579          * but in case the pipe is enabled w/o any ports we need a sane
5580          * default.
5581          */
5582         pipe_config->hw.adjusted_mode.crtc_clock =
5583                 pipe_config->port_clock / pipe_config->pixel_multiplier;
5584
5585         ret = true;
5586
5587 out:
5588         intel_display_power_put(dev_priv, power_domain, wakeref);
5589
5590         return ret;
5591 }
5592
5593 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5594 {
5595         struct intel_encoder *encoder;
5596         int i;
5597         u32 val, final;
5598         bool has_lvds = false;
5599         bool has_cpu_edp = false;
5600         bool has_panel = false;
5601         bool has_ck505 = false;
5602         bool can_ssc = false;
5603         bool using_ssc_source = false;
5604
5605         /* We need to take the global config into account */
5606         for_each_intel_encoder(&dev_priv->drm, encoder) {
5607                 switch (encoder->type) {
5608                 case INTEL_OUTPUT_LVDS:
5609                         has_panel = true;
5610                         has_lvds = true;
5611                         break;
5612                 case INTEL_OUTPUT_EDP:
5613                         has_panel = true;
5614                         if (encoder->port == PORT_A)
5615                                 has_cpu_edp = true;
5616                         break;
5617                 default:
5618                         break;
5619                 }
5620         }
5621
5622         if (HAS_PCH_IBX(dev_priv)) {
5623                 has_ck505 = dev_priv->vbt.display_clock_mode;
5624                 can_ssc = has_ck505;
5625         } else {
5626                 has_ck505 = false;
5627                 can_ssc = true;
5628         }
5629
5630         /* Check if any DPLLs are using the SSC source */
5631         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5632                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5633
5634                 if (!(temp & DPLL_VCO_ENABLE))
5635                         continue;
5636
5637                 if ((temp & PLL_REF_INPUT_MASK) ==
5638                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5639                         using_ssc_source = true;
5640                         break;
5641                 }
5642         }
5643
5644         drm_dbg_kms(&dev_priv->drm,
5645                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5646                     has_panel, has_lvds, has_ck505, using_ssc_source);
5647
5648         /* Ironlake: try to setup display ref clock before DPLL
5649          * enabling. This is only under driver's control after
5650          * PCH B stepping, previous chipset stepping should be
5651          * ignoring this setting.
5652          */
5653         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5654
5655         /* As we must carefully and slowly disable/enable each source in turn,
5656          * compute the final state we want first and check if we need to
5657          * make any changes at all.
5658          */
5659         final = val;
5660         final &= ~DREF_NONSPREAD_SOURCE_MASK;
5661         if (has_ck505)
5662                 final |= DREF_NONSPREAD_CK505_ENABLE;
5663         else
5664                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
5665
5666         final &= ~DREF_SSC_SOURCE_MASK;
5667         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5668         final &= ~DREF_SSC1_ENABLE;
5669
5670         if (has_panel) {
5671                 final |= DREF_SSC_SOURCE_ENABLE;
5672
5673                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5674                         final |= DREF_SSC1_ENABLE;
5675
5676                 if (has_cpu_edp) {
5677                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
5678                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5679                         else
5680                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5681                 } else
5682                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5683         } else if (using_ssc_source) {
5684                 final |= DREF_SSC_SOURCE_ENABLE;
5685                 final |= DREF_SSC1_ENABLE;
5686         }
5687
5688         if (final == val)
5689                 return;
5690
5691         /* Always enable nonspread source */
5692         val &= ~DREF_NONSPREAD_SOURCE_MASK;
5693
5694         if (has_ck505)
5695                 val |= DREF_NONSPREAD_CK505_ENABLE;
5696         else
5697                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
5698
5699         if (has_panel) {
5700                 val &= ~DREF_SSC_SOURCE_MASK;
5701                 val |= DREF_SSC_SOURCE_ENABLE;
5702
5703                 /* SSC must be turned on before enabling the CPU output  */
5704                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5705                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5706                         val |= DREF_SSC1_ENABLE;
5707                 } else
5708                         val &= ~DREF_SSC1_ENABLE;
5709
5710                 /* Get SSC going before enabling the outputs */
5711                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5712                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5713                 udelay(200);
5714
5715                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5716
5717                 /* Enable CPU source on CPU attached eDP */
5718                 if (has_cpu_edp) {
5719                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5720                                 drm_dbg_kms(&dev_priv->drm,
5721                                             "Using SSC on eDP\n");
5722                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5723                         } else
5724                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5725                 } else
5726                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5727
5728                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5729                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5730                 udelay(200);
5731         } else {
5732                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5733
5734                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5735
5736                 /* Turn off CPU output */
5737                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5738
5739                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5740                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5741                 udelay(200);
5742
5743                 if (!using_ssc_source) {
5744                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5745
5746                         /* Turn off the SSC source */
5747                         val &= ~DREF_SSC_SOURCE_MASK;
5748                         val |= DREF_SSC_SOURCE_DISABLE;
5749
5750                         /* Turn off SSC1 */
5751                         val &= ~DREF_SSC1_ENABLE;
5752
5753                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5754                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5755                         udelay(200);
5756                 }
5757         }
5758
5759         BUG_ON(val != final);
5760 }
5761
5762 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5763 {
5764         u32 tmp;
5765
5766         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5767         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5768         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5769
5770         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5771                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5772                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5773
5774         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5775         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5776         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5777
5778         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5779                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5780                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5781 }
5782
5783 /* WaMPhyProgramming:hsw */
5784 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5785 {
5786         u32 tmp;
5787
5788         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5789         tmp &= ~(0xFF << 24);
5790         tmp |= (0x12 << 24);
5791         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5792
5793         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5794         tmp |= (1 << 11);
5795         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5796
5797         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5798         tmp |= (1 << 11);
5799         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5800
5801         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5802         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5803         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5804
5805         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5806         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5807         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5808
5809         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5810         tmp &= ~(7 << 13);
5811         tmp |= (5 << 13);
5812         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5813
5814         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5815         tmp &= ~(7 << 13);
5816         tmp |= (5 << 13);
5817         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5818
5819         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5820         tmp &= ~0xFF;
5821         tmp |= 0x1C;
5822         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5823
5824         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5825         tmp &= ~0xFF;
5826         tmp |= 0x1C;
5827         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5828
5829         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5830         tmp &= ~(0xFF << 16);
5831         tmp |= (0x1C << 16);
5832         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5833
5834         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5835         tmp &= ~(0xFF << 16);
5836         tmp |= (0x1C << 16);
5837         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5838
5839         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5840         tmp |= (1 << 27);
5841         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5842
5843         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5844         tmp |= (1 << 27);
5845         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5846
5847         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5848         tmp &= ~(0xF << 28);
5849         tmp |= (4 << 28);
5850         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5851
5852         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5853         tmp &= ~(0xF << 28);
5854         tmp |= (4 << 28);
5855         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5856 }
5857
5858 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5859  * Programming" based on the parameters passed:
5860  * - Sequence to enable CLKOUT_DP
5861  * - Sequence to enable CLKOUT_DP without spread
5862  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5863  */
5864 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5865                                  bool with_spread, bool with_fdi)
5866 {
5867         u32 reg, tmp;
5868
5869         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5870                      "FDI requires downspread\n"))
5871                 with_spread = true;
5872         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5873                      with_fdi, "LP PCH doesn't have FDI\n"))
5874                 with_fdi = false;
5875
5876         mutex_lock(&dev_priv->sb_lock);
5877
5878         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5879         tmp &= ~SBI_SSCCTL_DISABLE;
5880         tmp |= SBI_SSCCTL_PATHALT;
5881         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5882
5883         udelay(24);
5884
5885         if (with_spread) {
5886                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5887                 tmp &= ~SBI_SSCCTL_PATHALT;
5888                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5889
5890                 if (with_fdi) {
5891                         lpt_reset_fdi_mphy(dev_priv);
5892                         lpt_program_fdi_mphy(dev_priv);
5893                 }
5894         }
5895
5896         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5897         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5898         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5899         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5900
5901         mutex_unlock(&dev_priv->sb_lock);
5902 }
5903
5904 /* Sequence to disable CLKOUT_DP */
5905 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5906 {
5907         u32 reg, tmp;
5908
5909         mutex_lock(&dev_priv->sb_lock);
5910
5911         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5912         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5913         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5914         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5915
5916         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5917         if (!(tmp & SBI_SSCCTL_DISABLE)) {
5918                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5919                         tmp |= SBI_SSCCTL_PATHALT;
5920                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5921                         udelay(32);
5922                 }
5923                 tmp |= SBI_SSCCTL_DISABLE;
5924                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5925         }
5926
5927         mutex_unlock(&dev_priv->sb_lock);
5928 }
5929
5930 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5931
5932 static const u16 sscdivintphase[] = {
5933         [BEND_IDX( 50)] = 0x3B23,
5934         [BEND_IDX( 45)] = 0x3B23,
5935         [BEND_IDX( 40)] = 0x3C23,
5936         [BEND_IDX( 35)] = 0x3C23,
5937         [BEND_IDX( 30)] = 0x3D23,
5938         [BEND_IDX( 25)] = 0x3D23,
5939         [BEND_IDX( 20)] = 0x3E23,
5940         [BEND_IDX( 15)] = 0x3E23,
5941         [BEND_IDX( 10)] = 0x3F23,
5942         [BEND_IDX(  5)] = 0x3F23,
5943         [BEND_IDX(  0)] = 0x0025,
5944         [BEND_IDX( -5)] = 0x0025,
5945         [BEND_IDX(-10)] = 0x0125,
5946         [BEND_IDX(-15)] = 0x0125,
5947         [BEND_IDX(-20)] = 0x0225,
5948         [BEND_IDX(-25)] = 0x0225,
5949         [BEND_IDX(-30)] = 0x0325,
5950         [BEND_IDX(-35)] = 0x0325,
5951         [BEND_IDX(-40)] = 0x0425,
5952         [BEND_IDX(-45)] = 0x0425,
5953         [BEND_IDX(-50)] = 0x0525,
5954 };
5955
5956 /*
5957  * Bend CLKOUT_DP
5958  * steps -50 to 50 inclusive, in steps of 5
5959  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5960  * change in clock period = -(steps / 10) * 5.787 ps
5961  */
5962 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5963 {
5964         u32 tmp;
5965         int idx = BEND_IDX(steps);
5966
5967         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5968                 return;
5969
5970         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5971                 return;
5972
5973         mutex_lock(&dev_priv->sb_lock);
5974
5975         if (steps % 10 != 0)
5976                 tmp = 0xAAAAAAAB;
5977         else
5978                 tmp = 0x00000000;
5979         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5980
5981         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5982         tmp &= 0xffff0000;
5983         tmp |= sscdivintphase[idx];
5984         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5985
5986         mutex_unlock(&dev_priv->sb_lock);
5987 }
5988
5989 #undef BEND_IDX
5990
5991 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5992 {
5993         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5994         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5995
5996         if ((ctl & SPLL_PLL_ENABLE) == 0)
5997                 return false;
5998
5999         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
6000             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6001                 return true;
6002
6003         if (IS_BROADWELL(dev_priv) &&
6004             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
6005                 return true;
6006
6007         return false;
6008 }
6009
6010 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
6011                                enum intel_dpll_id id)
6012 {
6013         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
6014         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
6015
6016         if ((ctl & WRPLL_PLL_ENABLE) == 0)
6017                 return false;
6018
6019         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
6020                 return true;
6021
6022         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
6023             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
6024             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6025                 return true;
6026
6027         return false;
6028 }
6029
6030 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
6031 {
6032         struct intel_encoder *encoder;
6033         bool has_fdi = false;
6034
6035         for_each_intel_encoder(&dev_priv->drm, encoder) {
6036                 switch (encoder->type) {
6037                 case INTEL_OUTPUT_ANALOG:
6038                         has_fdi = true;
6039                         break;
6040                 default:
6041                         break;
6042                 }
6043         }
6044
6045         /*
6046          * The BIOS may have decided to use the PCH SSC
6047          * reference so we must not disable it until the
6048          * relevant PLLs have stopped relying on it. We'll
6049          * just leave the PCH SSC reference enabled in case
6050          * any active PLL is using it. It will get disabled
6051          * after runtime suspend if we don't have FDI.
6052          *
6053          * TODO: Move the whole reference clock handling
6054          * to the modeset sequence proper so that we can
6055          * actually enable/disable/reconfigure these things
6056          * safely. To do that we need to introduce a real
6057          * clock hierarchy. That would also allow us to do
6058          * clock bending finally.
6059          */
6060         dev_priv->pch_ssc_use = 0;
6061
6062         if (spll_uses_pch_ssc(dev_priv)) {
6063                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
6064                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
6065         }
6066
6067         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
6068                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
6069                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
6070         }
6071
6072         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
6073                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
6074                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
6075         }
6076
6077         if (dev_priv->pch_ssc_use)
6078                 return;
6079
6080         if (has_fdi) {
6081                 lpt_bend_clkout_dp(dev_priv, 0);
6082                 lpt_enable_clkout_dp(dev_priv, true, true);
6083         } else {
6084                 lpt_disable_clkout_dp(dev_priv);
6085         }
6086 }
6087
6088 /*
6089  * Initialize reference clocks when the driver loads
6090  */
6091 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
6092 {
6093         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
6094                 ilk_init_pch_refclk(dev_priv);
6095         else if (HAS_PCH_LPT(dev_priv))
6096                 lpt_init_pch_refclk(dev_priv);
6097 }
6098
6099 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
6100 {
6101         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6102         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6103         enum pipe pipe = crtc->pipe;
6104         u32 val;
6105
6106         val = 0;
6107
6108         switch (crtc_state->pipe_bpp) {
6109         case 18:
6110                 val |= PIPECONF_6BPC;
6111                 break;
6112         case 24:
6113                 val |= PIPECONF_8BPC;
6114                 break;
6115         case 30:
6116                 val |= PIPECONF_10BPC;
6117                 break;
6118         case 36:
6119                 val |= PIPECONF_12BPC;
6120                 break;
6121         default:
6122                 /* Case prevented by intel_choose_pipe_bpp_dither. */
6123                 BUG();
6124         }
6125
6126         if (crtc_state->dither)
6127                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6128
6129         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6130                 val |= PIPECONF_INTERLACED_ILK;
6131         else
6132                 val |= PIPECONF_PROGRESSIVE;
6133
6134         /*
6135          * This would end up with an odd purple hue over
6136          * the entire display. Make sure we don't do it.
6137          */
6138         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
6139                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
6140
6141         if (crtc_state->limited_color_range &&
6142             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6143                 val |= PIPECONF_COLOR_RANGE_SELECT;
6144
6145         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6146                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
6147
6148         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
6149
6150         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
6151
6152         intel_de_write(dev_priv, PIPECONF(pipe), val);
6153         intel_de_posting_read(dev_priv, PIPECONF(pipe));
6154 }
6155
6156 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
6157 {
6158         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6159         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6160         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6161         u32 val = 0;
6162
6163         if (IS_HASWELL(dev_priv) && crtc_state->dither)
6164                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6165
6166         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6167                 val |= PIPECONF_INTERLACED_ILK;
6168         else
6169                 val |= PIPECONF_PROGRESSIVE;
6170
6171         if (IS_HASWELL(dev_priv) &&
6172             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6173                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
6174
6175         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
6176         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
6177 }
6178
6179 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
6180 {
6181         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6182         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6183         u32 val = 0;
6184
6185         switch (crtc_state->pipe_bpp) {
6186         case 18:
6187                 val |= PIPEMISC_DITHER_6_BPC;
6188                 break;
6189         case 24:
6190                 val |= PIPEMISC_DITHER_8_BPC;
6191                 break;
6192         case 30:
6193                 val |= PIPEMISC_DITHER_10_BPC;
6194                 break;
6195         case 36:
6196                 val |= PIPEMISC_DITHER_12_BPC;
6197                 break;
6198         default:
6199                 MISSING_CASE(crtc_state->pipe_bpp);
6200                 break;
6201         }
6202
6203         if (crtc_state->dither)
6204                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6205
6206         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6207             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
6208                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
6209
6210         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
6211                 val |= PIPEMISC_YUV420_ENABLE |
6212                         PIPEMISC_YUV420_MODE_FULL_BLEND;
6213
6214         if (INTEL_GEN(dev_priv) >= 11 &&
6215             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
6216                                            BIT(PLANE_CURSOR))) == 0)
6217                 val |= PIPEMISC_HDR_MODE_PRECISION;
6218
6219         if (INTEL_GEN(dev_priv) >= 12)
6220                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
6221
6222         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
6223 }
6224
6225 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
6226 {
6227         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6228         u32 tmp;
6229
6230         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
6231
6232         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
6233         case PIPEMISC_DITHER_6_BPC:
6234                 return 18;
6235         case PIPEMISC_DITHER_8_BPC:
6236                 return 24;
6237         case PIPEMISC_DITHER_10_BPC:
6238                 return 30;
6239         case PIPEMISC_DITHER_12_BPC:
6240                 return 36;
6241         default:
6242                 MISSING_CASE(tmp);
6243                 return 0;
6244         }
6245 }
6246
6247 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
6248 {
6249         /*
6250          * Account for spread spectrum to avoid
6251          * oversubscribing the link. Max center spread
6252          * is 2.5%; use 5% for safety's sake.
6253          */
6254         u32 bps = target_clock * bpp * 21 / 20;
6255         return DIV_ROUND_UP(bps, link_bw * 8);
6256 }
6257
6258 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6259                                          struct intel_link_m_n *m_n)
6260 {
6261         struct drm_device *dev = crtc->base.dev;
6262         struct drm_i915_private *dev_priv = to_i915(dev);
6263         enum pipe pipe = crtc->pipe;
6264
6265         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
6266         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
6267         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6268                 & ~TU_SIZE_MASK;
6269         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
6270         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6271                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6272 }
6273
6274 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6275                                          enum transcoder transcoder,
6276                                          struct intel_link_m_n *m_n,
6277                                          struct intel_link_m_n *m2_n2)
6278 {
6279         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6280         enum pipe pipe = crtc->pipe;
6281
6282         if (INTEL_GEN(dev_priv) >= 5) {
6283                 m_n->link_m = intel_de_read(dev_priv,
6284                                             PIPE_LINK_M1(transcoder));
6285                 m_n->link_n = intel_de_read(dev_priv,
6286                                             PIPE_LINK_N1(transcoder));
6287                 m_n->gmch_m = intel_de_read(dev_priv,
6288                                             PIPE_DATA_M1(transcoder))
6289                         & ~TU_SIZE_MASK;
6290                 m_n->gmch_n = intel_de_read(dev_priv,
6291                                             PIPE_DATA_N1(transcoder));
6292                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
6293                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6294
6295                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
6296                         m2_n2->link_m = intel_de_read(dev_priv,
6297                                                       PIPE_LINK_M2(transcoder));
6298                         m2_n2->link_n = intel_de_read(dev_priv,
6299                                                              PIPE_LINK_N2(transcoder));
6300                         m2_n2->gmch_m = intel_de_read(dev_priv,
6301                                                              PIPE_DATA_M2(transcoder))
6302                                         & ~TU_SIZE_MASK;
6303                         m2_n2->gmch_n = intel_de_read(dev_priv,
6304                                                              PIPE_DATA_N2(transcoder));
6305                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
6306                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6307                 }
6308         } else {
6309                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
6310                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
6311                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6312                         & ~TU_SIZE_MASK;
6313                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
6314                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6315                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6316         }
6317 }
6318
6319 void intel_dp_get_m_n(struct intel_crtc *crtc,
6320                       struct intel_crtc_state *pipe_config)
6321 {
6322         if (pipe_config->has_pch_encoder)
6323                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6324         else
6325                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6326                                              &pipe_config->dp_m_n,
6327                                              &pipe_config->dp_m2_n2);
6328 }
6329
6330 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
6331                                    struct intel_crtc_state *pipe_config)
6332 {
6333         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6334                                      &pipe_config->fdi_m_n, NULL);
6335 }
6336
6337 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
6338                                   u32 pos, u32 size)
6339 {
6340         drm_rect_init(&crtc_state->pch_pfit.dst,
6341                       pos >> 16, pos & 0xffff,
6342                       size >> 16, size & 0xffff);
6343 }
6344
6345 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
6346 {
6347         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6348         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6349         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
6350         int id = -1;
6351         int i;
6352
6353         /* find scaler attached to this pipe */
6354         for (i = 0; i < crtc->num_scalers; i++) {
6355                 u32 ctl, pos, size;
6356
6357                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
6358                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
6359                         continue;
6360
6361                 id = i;
6362                 crtc_state->pch_pfit.enabled = true;
6363
6364                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
6365                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
6366
6367                 ilk_get_pfit_pos_size(crtc_state, pos, size);
6368
6369                 scaler_state->scalers[i].in_use = true;
6370                 break;
6371         }
6372
6373         scaler_state->scaler_id = id;
6374         if (id >= 0)
6375                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
6376         else
6377                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
6378 }
6379
6380 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
6381 {
6382         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6383         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6384         u32 ctl, pos, size;
6385
6386         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
6387         if ((ctl & PF_ENABLE) == 0)
6388                 return;
6389
6390         crtc_state->pch_pfit.enabled = true;
6391
6392         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
6393         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
6394
6395         ilk_get_pfit_pos_size(crtc_state, pos, size);
6396
6397         /*
6398          * We currently do not free assignements of panel fitters on
6399          * ivb/hsw (since we don't use the higher upscaling modes which
6400          * differentiates them) so just WARN about this case for now.
6401          */
6402         drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
6403                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
6404 }
6405
6406 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
6407                                 struct intel_crtc_state *pipe_config)
6408 {
6409         struct drm_device *dev = crtc->base.dev;
6410         struct drm_i915_private *dev_priv = to_i915(dev);
6411         enum intel_display_power_domain power_domain;
6412         intel_wakeref_t wakeref;
6413         u32 tmp;
6414         bool ret;
6415
6416         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6417         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6418         if (!wakeref)
6419                 return false;
6420
6421         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6422         pipe_config->shared_dpll = NULL;
6423
6424         ret = false;
6425         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6426         if (!(tmp & PIPECONF_ENABLE))
6427                 goto out;
6428
6429         switch (tmp & PIPECONF_BPC_MASK) {
6430         case PIPECONF_6BPC:
6431                 pipe_config->pipe_bpp = 18;
6432                 break;
6433         case PIPECONF_8BPC:
6434                 pipe_config->pipe_bpp = 24;
6435                 break;
6436         case PIPECONF_10BPC:
6437                 pipe_config->pipe_bpp = 30;
6438                 break;
6439         case PIPECONF_12BPC:
6440                 pipe_config->pipe_bpp = 36;
6441                 break;
6442         default:
6443                 break;
6444         }
6445
6446         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
6447                 pipe_config->limited_color_range = true;
6448
6449         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
6450         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
6451         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
6452                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6453                 break;
6454         default:
6455                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6456                 break;
6457         }
6458
6459         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
6460                 PIPECONF_GAMMA_MODE_SHIFT;
6461
6462         pipe_config->csc_mode = intel_de_read(dev_priv,
6463                                               PIPE_CSC_MODE(crtc->pipe));
6464
6465         i9xx_get_pipe_color_config(pipe_config);
6466         intel_color_get_config(pipe_config);
6467
6468         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6469                 struct intel_shared_dpll *pll;
6470                 enum intel_dpll_id pll_id;
6471                 bool pll_active;
6472
6473                 pipe_config->has_pch_encoder = true;
6474
6475                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6476                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6477                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
6478
6479                 ilk_get_fdi_m_n_config(crtc, pipe_config);
6480
6481                 if (HAS_PCH_IBX(dev_priv)) {
6482                         /*
6483                          * The pipe->pch transcoder and pch transcoder->pll
6484                          * mapping is fixed.
6485                          */
6486                         pll_id = (enum intel_dpll_id) crtc->pipe;
6487                 } else {
6488                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6489                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6490                                 pll_id = DPLL_ID_PCH_PLL_B;
6491                         else
6492                                 pll_id= DPLL_ID_PCH_PLL_A;
6493                 }
6494
6495                 pipe_config->shared_dpll =
6496                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
6497                 pll = pipe_config->shared_dpll;
6498
6499                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6500                                                      &pipe_config->dpll_hw_state);
6501                 drm_WARN_ON(dev, !pll_active);
6502
6503                 tmp = pipe_config->dpll_hw_state.dpll;
6504                 pipe_config->pixel_multiplier =
6505                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6506                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6507
6508                 ilk_pch_clock_get(crtc, pipe_config);
6509         } else {
6510                 pipe_config->pixel_multiplier = 1;
6511         }
6512
6513         intel_get_transcoder_timings(crtc, pipe_config);
6514         intel_get_pipe_src_size(crtc, pipe_config);
6515
6516         ilk_get_pfit_config(pipe_config);
6517
6518         ret = true;
6519
6520 out:
6521         intel_display_power_put(dev_priv, power_domain, wakeref);
6522
6523         return ret;
6524 }
6525
6526 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6527                             struct intel_crtc_state *pipe_config)
6528 {
6529         enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
6530         enum phy phy = intel_port_to_phy(dev_priv, port);
6531         struct icl_port_dpll *port_dpll;
6532         struct intel_shared_dpll *pll;
6533         enum intel_dpll_id id;
6534         bool pll_active;
6535         u32 clk_sel;
6536
6537         clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
6538         id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
6539
6540         if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
6541                 return;
6542
6543         pll = intel_get_shared_dpll_by_id(dev_priv, id);
6544         port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
6545
6546         port_dpll->pll = pll;
6547         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6548                                              &port_dpll->hw_state);
6549         drm_WARN_ON(&dev_priv->drm, !pll_active);
6550
6551         icl_set_active_port_dpll(pipe_config, port_dpll_id);
6552 }
6553
6554 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6555                             struct intel_crtc_state *pipe_config)
6556 {
6557         enum phy phy = intel_port_to_phy(dev_priv, port);
6558         enum icl_port_dpll_id port_dpll_id;
6559         struct icl_port_dpll *port_dpll;
6560         struct intel_shared_dpll *pll;
6561         enum intel_dpll_id id;
6562         bool pll_active;
6563         i915_reg_t reg;
6564         u32 temp;
6565
6566         if (intel_phy_is_combo(dev_priv, phy)) {
6567                 u32 mask, shift;
6568
6569                 if (IS_ALDERLAKE_S(dev_priv)) {
6570                         reg = ADLS_DPCLKA_CFGCR(phy);
6571                         mask = ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy);
6572                         shift = ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy);
6573                 } else if (IS_ROCKETLAKE(dev_priv)) {
6574                         reg = ICL_DPCLKA_CFGCR0;
6575                         mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
6576                         shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
6577                 } else {
6578                         reg = ICL_DPCLKA_CFGCR0;
6579                         mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
6580                         shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
6581                 }
6582
6583                 temp = intel_de_read(dev_priv, reg) & mask;
6584                 id = temp >> shift;
6585                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
6586         } else if (intel_phy_is_tc(dev_priv, phy)) {
6587                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
6588
6589                 if (clk_sel == DDI_CLK_SEL_MG) {
6590                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
6591                                                                     port));
6592                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
6593                 } else {
6594                         drm_WARN_ON(&dev_priv->drm,
6595                                     clk_sel < DDI_CLK_SEL_TBT_162);
6596                         id = DPLL_ID_ICL_TBTPLL;
6597                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
6598                 }
6599         } else {
6600                 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
6601                 return;
6602         }
6603
6604         pll = intel_get_shared_dpll_by_id(dev_priv, id);
6605         port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
6606
6607         port_dpll->pll = pll;
6608         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6609                                              &port_dpll->hw_state);
6610         drm_WARN_ON(&dev_priv->drm, !pll_active);
6611
6612         icl_set_active_port_dpll(pipe_config, port_dpll_id);
6613 }
6614
6615 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6616                             struct intel_crtc_state *pipe_config)
6617 {
6618         struct intel_shared_dpll *pll;
6619         enum intel_dpll_id id;
6620         bool pll_active;
6621         u32 temp;
6622
6623         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
6624         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
6625
6626         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
6627                 return;
6628
6629         pll = intel_get_shared_dpll_by_id(dev_priv, id);
6630
6631         pipe_config->shared_dpll = pll;
6632         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6633                                              &pipe_config->dpll_hw_state);
6634         drm_WARN_ON(&dev_priv->drm, !pll_active);
6635 }
6636
6637 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
6638                                 enum port port,
6639                                 struct intel_crtc_state *pipe_config)
6640 {
6641         struct intel_shared_dpll *pll;
6642         enum intel_dpll_id id;
6643         bool pll_active;
6644
6645         switch (port) {
6646         case PORT_A:
6647                 id = DPLL_ID_SKL_DPLL0;
6648                 break;
6649         case PORT_B:
6650                 id = DPLL_ID_SKL_DPLL1;
6651                 break;
6652         case PORT_C:
6653                 id = DPLL_ID_SKL_DPLL2;
6654                 break;
6655         default:
6656                 drm_err(&dev_priv->drm, "Incorrect port type\n");
6657                 return;
6658         }
6659
6660         pll = intel_get_shared_dpll_by_id(dev_priv, id);
6661
6662         pipe_config->shared_dpll = pll;
6663         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6664                                              &pipe_config->dpll_hw_state);
6665         drm_WARN_ON(&dev_priv->drm, !pll_active);
6666 }
6667
6668 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6669                             struct intel_crtc_state *pipe_config)
6670 {
6671         struct intel_shared_dpll *pll;
6672         enum intel_dpll_id id;
6673         bool pll_active;
6674         u32 temp;
6675
6676         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
6677         id = temp >> (port * 3 + 1);
6678
6679         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
6680                 return;
6681
6682         pll = intel_get_shared_dpll_by_id(dev_priv, id);
6683
6684         pipe_config->shared_dpll = pll;
6685         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6686                                              &pipe_config->dpll_hw_state);
6687         drm_WARN_ON(&dev_priv->drm, !pll_active);
6688 }
6689
6690 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
6691                             struct intel_crtc_state *pipe_config)
6692 {
6693         struct intel_shared_dpll *pll;
6694         enum intel_dpll_id id;
6695         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
6696         bool pll_active;
6697
6698         switch (ddi_pll_sel) {
6699         case PORT_CLK_SEL_WRPLL1:
6700                 id = DPLL_ID_WRPLL1;
6701                 break;
6702         case PORT_CLK_SEL_WRPLL2:
6703                 id = DPLL_ID_WRPLL2;
6704                 break;
6705         case PORT_CLK_SEL_SPLL:
6706                 id = DPLL_ID_SPLL;
6707                 break;
6708         case PORT_CLK_SEL_LCPLL_810:
6709                 id = DPLL_ID_LCPLL_810;
6710                 break;
6711         case PORT_CLK_SEL_LCPLL_1350:
6712                 id = DPLL_ID_LCPLL_1350;
6713                 break;
6714         case PORT_CLK_SEL_LCPLL_2700:
6715                 id = DPLL_ID_LCPLL_2700;
6716                 break;
6717         default:
6718                 MISSING_CASE(ddi_pll_sel);
6719                 fallthrough;
6720         case PORT_CLK_SEL_NONE:
6721                 return;
6722         }
6723
6724         pll = intel_get_shared_dpll_by_id(dev_priv, id);
6725
6726         pipe_config->shared_dpll = pll;
6727         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6728                                              &pipe_config->dpll_hw_state);
6729         drm_WARN_ON(&dev_priv->drm, !pll_active);
6730 }
6731
6732 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6733                                      struct intel_crtc_state *pipe_config,
6734                                      struct intel_display_power_domain_set *power_domain_set)
6735 {
6736         struct drm_device *dev = crtc->base.dev;
6737         struct drm_i915_private *dev_priv = to_i915(dev);
6738         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6739         unsigned long enabled_panel_transcoders = 0;
6740         enum transcoder panel_transcoder;
6741         u32 tmp;
6742
6743         if (INTEL_GEN(dev_priv) >= 11)
6744                 panel_transcoder_mask |=
6745                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6746
6747         /*
6748          * The pipe->transcoder mapping is fixed with the exception of the eDP
6749          * and DSI transcoders handled below.
6750          */
6751         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6752
6753         /*
6754          * XXX: Do intel_display_power_get_if_enabled before reading this (for
6755          * consistency and less surprising code; it's in always on power).
6756          */
6757         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6758                                        panel_transcoder_mask) {
6759                 bool force_thru = false;
6760                 enum pipe trans_pipe;
6761
6762                 tmp = intel_de_read(dev_priv,
6763                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
6764                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6765                         continue;
6766
6767                 /*
6768                  * Log all enabled ones, only use the first one.
6769                  *
6770                  * FIXME: This won't work for two separate DSI displays.
6771                  */
6772                 enabled_panel_transcoders |= BIT(panel_transcoder);
6773                 if (enabled_panel_transcoders != BIT(panel_transcoder))
6774                         continue;
6775
6776                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6777                 default:
6778                         drm_WARN(dev, 1,
6779                                  "unknown pipe linked to transcoder %s\n",
6780                                  transcoder_name(panel_transcoder));
6781                         fallthrough;
6782                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
6783                         force_thru = true;
6784                         fallthrough;
6785                 case TRANS_DDI_EDP_INPUT_A_ON:
6786                         trans_pipe = PIPE_A;
6787                         break;
6788                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
6789                         trans_pipe = PIPE_B;
6790                         break;
6791                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
6792                         trans_pipe = PIPE_C;
6793                         break;
6794                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
6795                         trans_pipe = PIPE_D;
6796                         break;
6797                 }
6798
6799                 if (trans_pipe == crtc->pipe) {
6800                         pipe_config->cpu_transcoder = panel_transcoder;
6801                         pipe_config->pch_pfit.force_thru = force_thru;
6802                 }
6803         }
6804
6805         /*
6806          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6807          */
6808         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6809                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6810
6811         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6812                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6813                 return false;
6814
6815         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6816
6817         return tmp & PIPECONF_ENABLE;
6818 }
6819
6820 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6821                                          struct intel_crtc_state *pipe_config,
6822                                          struct intel_display_power_domain_set *power_domain_set)
6823 {
6824         struct drm_device *dev = crtc->base.dev;
6825         struct drm_i915_private *dev_priv = to_i915(dev);
6826         enum transcoder cpu_transcoder;
6827         enum port port;
6828         u32 tmp;
6829
6830         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6831                 if (port == PORT_A)
6832                         cpu_transcoder = TRANSCODER_DSI_A;
6833                 else
6834                         cpu_transcoder = TRANSCODER_DSI_C;
6835
6836                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6837                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6838                         continue;
6839
6840                 /*
6841                  * The PLL needs to be enabled with a valid divider
6842                  * configuration, otherwise accessing DSI registers will hang
6843                  * the machine. See BSpec North Display Engine
6844                  * registers/MIPI[BXT]. We can break out here early, since we
6845                  * need the same DSI PLL to be enabled for both DSI ports.
6846                  */
6847                 if (!bxt_dsi_pll_is_enabled(dev_priv))
6848                         break;
6849
6850                 /* XXX: this works for video mode only */
6851                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6852                 if (!(tmp & DPI_ENABLE))
6853                         continue;
6854
6855                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6856                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6857                         continue;
6858
6859                 pipe_config->cpu_transcoder = cpu_transcoder;
6860                 break;
6861         }
6862
6863         return transcoder_is_dsi(pipe_config->cpu_transcoder);
6864 }
6865
6866 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6867                                    struct intel_crtc_state *pipe_config)
6868 {
6869         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6870         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6871         enum port port;
6872         u32 tmp;
6873
6874         if (transcoder_is_dsi(cpu_transcoder)) {
6875                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6876                                                 PORT_A : PORT_B;
6877         } else {
6878                 tmp = intel_de_read(dev_priv,
6879                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
6880                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6881                         return;
6882                 if (INTEL_GEN(dev_priv) >= 12)
6883                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6884                 else
6885                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6886         }
6887
6888         if (IS_DG1(dev_priv))
6889                 dg1_get_ddi_pll(dev_priv, port, pipe_config);
6890         else if (INTEL_GEN(dev_priv) >= 11)
6891                 icl_get_ddi_pll(dev_priv, port, pipe_config);
6892         else if (IS_CANNONLAKE(dev_priv))
6893                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
6894         else if (IS_GEN9_LP(dev_priv))
6895                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
6896         else if (IS_GEN9_BC(dev_priv))
6897                 skl_get_ddi_pll(dev_priv, port, pipe_config);
6898         else
6899                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
6900
6901         /*
6902          * Haswell has only FDI/PCH transcoder A. It is which is connected to
6903          * DDI E. So just check whether this pipe is wired to DDI E and whether
6904          * the PCH transcoder is on.
6905          */
6906         if (INTEL_GEN(dev_priv) < 9 &&
6907             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6908                 pipe_config->has_pch_encoder = true;
6909
6910                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6911                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6912                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
6913
6914                 ilk_get_fdi_m_n_config(crtc, pipe_config);
6915         }
6916 }
6917
6918 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6919                                 struct intel_crtc_state *pipe_config)
6920 {
6921         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6922         struct intel_display_power_domain_set power_domain_set = { };
6923         bool active;
6924         u32 tmp;
6925
6926         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6927                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
6928                 return false;
6929
6930         pipe_config->shared_dpll = NULL;
6931
6932         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6933
6934         if (IS_GEN9_LP(dev_priv) &&
6935             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6936                 drm_WARN_ON(&dev_priv->drm, active);
6937                 active = true;
6938         }
6939
6940         intel_dsc_get_config(pipe_config);
6941
6942         if (!active) {
6943                 /* bigjoiner slave doesn't enable transcoder */
6944                 if (!pipe_config->bigjoiner_slave)
6945                         goto out;
6946
6947                 active = true;
6948                 pipe_config->pixel_multiplier = 1;
6949
6950                 /* we cannot read out most state, so don't bother.. */
6951                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6952         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6953             INTEL_GEN(dev_priv) >= 11) {
6954                 hsw_get_ddi_port_state(crtc, pipe_config);
6955                 intel_get_transcoder_timings(crtc, pipe_config);
6956         }
6957
6958         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6959                 intel_vrr_get_config(crtc, pipe_config);
6960
6961         intel_get_pipe_src_size(crtc, pipe_config);
6962
6963         if (IS_HASWELL(dev_priv)) {
6964                 u32 tmp = intel_de_read(dev_priv,
6965                                         PIPECONF(pipe_config->cpu_transcoder));
6966
6967                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6968                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6969                 else
6970                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6971         } else {
6972                 pipe_config->output_format =
6973                         bdw_get_pipemisc_output_format(crtc);
6974         }
6975
6976         pipe_config->gamma_mode = intel_de_read(dev_priv,
6977                                                 GAMMA_MODE(crtc->pipe));
6978
6979         pipe_config->csc_mode = intel_de_read(dev_priv,
6980                                               PIPE_CSC_MODE(crtc->pipe));
6981
6982         if (INTEL_GEN(dev_priv) >= 9) {
6983                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6984
6985                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6986                         pipe_config->gamma_enable = true;
6987
6988                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6989                         pipe_config->csc_enable = true;
6990         } else {
6991                 i9xx_get_pipe_color_config(pipe_config);
6992         }
6993
6994         intel_color_get_config(pipe_config);
6995
6996         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6997         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6998         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6999                 pipe_config->ips_linetime =
7000                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
7001
7002         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
7003                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
7004                 if (INTEL_GEN(dev_priv) >= 9)
7005                         skl_get_pfit_config(pipe_config);
7006                 else
7007                         ilk_get_pfit_config(pipe_config);
7008         }
7009
7010         if (hsw_crtc_supports_ips(crtc)) {
7011                 if (IS_HASWELL(dev_priv))
7012                         pipe_config->ips_enabled = intel_de_read(dev_priv,
7013                                                                  IPS_CTL) & IPS_ENABLE;
7014                 else {
7015                         /*
7016                          * We cannot readout IPS state on broadwell, set to
7017                          * true so we can set it to a defined state on first
7018                          * commit.
7019                          */
7020                         pipe_config->ips_enabled = true;
7021                 }
7022         }
7023
7024         if (pipe_config->bigjoiner_slave) {
7025                 /* Cannot be read out as a slave, set to 0. */
7026                 pipe_config->pixel_multiplier = 0;
7027         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
7028             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
7029                 pipe_config->pixel_multiplier =
7030                         intel_de_read(dev_priv,
7031                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7032         } else {
7033                 pipe_config->pixel_multiplier = 1;
7034         }
7035
7036 out:
7037         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
7038
7039         return active;
7040 }
7041
7042 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
7043 {
7044         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7045         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7046
7047         if (!i915->display.get_pipe_config(crtc, crtc_state))
7048                 return false;
7049
7050         crtc_state->hw.active = true;
7051
7052         intel_crtc_readout_derived_state(crtc_state);
7053
7054         return true;
7055 }
7056
7057 /* VESA 640x480x72Hz mode to set on the pipe */
7058 static const struct drm_display_mode load_detect_mode = {
7059         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7060                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7061 };
7062
7063 struct drm_framebuffer *
7064 intel_framebuffer_create(struct drm_i915_gem_object *obj,
7065                          struct drm_mode_fb_cmd2 *mode_cmd)
7066 {
7067         struct intel_framebuffer *intel_fb;
7068         int ret;
7069
7070         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7071         if (!intel_fb)
7072                 return ERR_PTR(-ENOMEM);
7073
7074         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
7075         if (ret)
7076                 goto err;
7077
7078         return &intel_fb->base;
7079
7080 err:
7081         kfree(intel_fb);
7082         return ERR_PTR(ret);
7083 }
7084
7085 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
7086                                         struct drm_crtc *crtc)
7087 {
7088         struct drm_plane *plane;
7089         struct drm_plane_state *plane_state;
7090         int ret, i;
7091
7092         ret = drm_atomic_add_affected_planes(state, crtc);
7093         if (ret)
7094                 return ret;
7095
7096         for_each_new_plane_in_state(state, plane, plane_state, i) {
7097                 if (plane_state->crtc != crtc)
7098                         continue;
7099
7100                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
7101                 if (ret)
7102                         return ret;
7103
7104                 drm_atomic_set_fb_for_plane(plane_state, NULL);
7105         }
7106
7107         return 0;
7108 }
7109
7110 int intel_get_load_detect_pipe(struct drm_connector *connector,
7111                                struct intel_load_detect_pipe *old,
7112                                struct drm_modeset_acquire_ctx *ctx)
7113 {
7114         struct intel_crtc *intel_crtc;
7115         struct intel_encoder *intel_encoder =
7116                 intel_attached_encoder(to_intel_connector(connector));
7117         struct drm_crtc *possible_crtc;
7118         struct drm_encoder *encoder = &intel_encoder->base;
7119         struct drm_crtc *crtc = NULL;
7120         struct drm_device *dev = encoder->dev;
7121         struct drm_i915_private *dev_priv = to_i915(dev);
7122         struct drm_mode_config *config = &dev->mode_config;
7123         struct drm_atomic_state *state = NULL, *restore_state = NULL;
7124         struct drm_connector_state *connector_state;
7125         struct intel_crtc_state *crtc_state;
7126         int ret, i = -1;
7127
7128         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7129                     connector->base.id, connector->name,
7130                     encoder->base.id, encoder->name);
7131
7132         old->restore_state = NULL;
7133
7134         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
7135
7136         /*
7137          * Algorithm gets a little messy:
7138          *
7139          *   - if the connector already has an assigned crtc, use it (but make
7140          *     sure it's on first)
7141          *
7142          *   - try to find the first unused crtc that can drive this connector,
7143          *     and use that if we find one
7144          */
7145
7146         /* See if we already have a CRTC for this connector */
7147         if (connector->state->crtc) {
7148                 crtc = connector->state->crtc;
7149
7150                 ret = drm_modeset_lock(&crtc->mutex, ctx);
7151                 if (ret)
7152                         goto fail;
7153
7154                 /* Make sure the crtc and connector are running */
7155                 goto found;
7156         }
7157
7158         /* Find an unused one (if possible) */
7159         for_each_crtc(dev, possible_crtc) {
7160                 i++;
7161                 if (!(encoder->possible_crtcs & (1 << i)))
7162                         continue;
7163
7164                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
7165                 if (ret)
7166                         goto fail;
7167
7168                 if (possible_crtc->state->enable) {
7169                         drm_modeset_unlock(&possible_crtc->mutex);
7170                         continue;
7171                 }
7172
7173                 crtc = possible_crtc;
7174                 break;
7175         }
7176
7177         /*
7178          * If we didn't find an unused CRTC, don't use any.
7179          */
7180         if (!crtc) {
7181                 drm_dbg_kms(&dev_priv->drm,
7182                             "no pipe available for load-detect\n");
7183                 ret = -ENODEV;
7184                 goto fail;
7185         }
7186
7187 found:
7188         intel_crtc = to_intel_crtc(crtc);
7189
7190         state = drm_atomic_state_alloc(dev);
7191         restore_state = drm_atomic_state_alloc(dev);
7192         if (!state || !restore_state) {
7193                 ret = -ENOMEM;
7194                 goto fail;
7195         }
7196
7197         state->acquire_ctx = ctx;
7198         restore_state->acquire_ctx = ctx;
7199
7200         connector_state = drm_atomic_get_connector_state(state, connector);
7201         if (IS_ERR(connector_state)) {
7202                 ret = PTR_ERR(connector_state);
7203                 goto fail;
7204         }
7205
7206         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
7207         if (ret)
7208                 goto fail;
7209
7210         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7211         if (IS_ERR(crtc_state)) {
7212                 ret = PTR_ERR(crtc_state);
7213                 goto fail;
7214         }
7215
7216         crtc_state->uapi.active = true;
7217
7218         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
7219                                            &load_detect_mode);
7220         if (ret)
7221                 goto fail;
7222
7223         ret = intel_modeset_disable_planes(state, crtc);
7224         if (ret)
7225                 goto fail;
7226
7227         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
7228         if (!ret)
7229                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
7230         if (!ret)
7231                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
7232         if (ret) {
7233                 drm_dbg_kms(&dev_priv->drm,
7234                             "Failed to create a copy of old state to restore: %i\n",
7235                             ret);
7236                 goto fail;
7237         }
7238
7239         ret = drm_atomic_commit(state);
7240         if (ret) {
7241                 drm_dbg_kms(&dev_priv->drm,
7242                             "failed to set mode on load-detect pipe\n");
7243                 goto fail;
7244         }
7245
7246         old->restore_state = restore_state;
7247         drm_atomic_state_put(state);
7248
7249         /* let the connector get through one full cycle before testing */
7250         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
7251         return true;
7252
7253 fail:
7254         if (state) {
7255                 drm_atomic_state_put(state);
7256                 state = NULL;
7257         }
7258         if (restore_state) {
7259                 drm_atomic_state_put(restore_state);
7260                 restore_state = NULL;
7261         }
7262
7263         if (ret == -EDEADLK)
7264                 return ret;
7265
7266         return false;
7267 }
7268
7269 void intel_release_load_detect_pipe(struct drm_connector *connector,
7270                                     struct intel_load_detect_pipe *old,
7271                                     struct drm_modeset_acquire_ctx *ctx)
7272 {
7273         struct intel_encoder *intel_encoder =
7274                 intel_attached_encoder(to_intel_connector(connector));
7275         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
7276         struct drm_encoder *encoder = &intel_encoder->base;
7277         struct drm_atomic_state *state = old->restore_state;
7278         int ret;
7279
7280         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7281                     connector->base.id, connector->name,
7282                     encoder->base.id, encoder->name);
7283
7284         if (!state)
7285                 return;
7286
7287         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
7288         if (ret)
7289                 drm_dbg_kms(&i915->drm,
7290                             "Couldn't release load detect pipe: %i\n", ret);
7291         drm_atomic_state_put(state);
7292 }
7293
7294 static int i9xx_pll_refclk(struct drm_device *dev,
7295                            const struct intel_crtc_state *pipe_config)
7296 {
7297         struct drm_i915_private *dev_priv = to_i915(dev);
7298         u32 dpll = pipe_config->dpll_hw_state.dpll;
7299
7300         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7301                 return dev_priv->vbt.lvds_ssc_freq;
7302         else if (HAS_PCH_SPLIT(dev_priv))
7303                 return 120000;
7304         else if (!IS_GEN(dev_priv, 2))
7305                 return 96000;
7306         else
7307                 return 48000;
7308 }
7309
7310 /* Returns the clock of the currently programmed mode of the given pipe. */
7311 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7312                                 struct intel_crtc_state *pipe_config)
7313 {
7314         struct drm_device *dev = crtc->base.dev;
7315         struct drm_i915_private *dev_priv = to_i915(dev);
7316         enum pipe pipe = crtc->pipe;
7317         u32 dpll = pipe_config->dpll_hw_state.dpll;
7318         u32 fp;
7319         struct dpll clock;
7320         int port_clock;
7321         int refclk = i9xx_pll_refclk(dev, pipe_config);
7322
7323         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7324                 fp = pipe_config->dpll_hw_state.fp0;
7325         else
7326                 fp = pipe_config->dpll_hw_state.fp1;
7327
7328         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7329         if (IS_PINEVIEW(dev_priv)) {
7330                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7331                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7332         } else {
7333                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7334                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7335         }
7336
7337         if (!IS_GEN(dev_priv, 2)) {
7338                 if (IS_PINEVIEW(dev_priv))
7339                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7340                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7341                 else
7342                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7343                                DPLL_FPA01_P1_POST_DIV_SHIFT);
7344
7345                 switch (dpll & DPLL_MODE_MASK) {
7346                 case DPLLB_MODE_DAC_SERIAL:
7347                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7348                                 5 : 10;
7349                         break;
7350                 case DPLLB_MODE_LVDS:
7351                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7352                                 7 : 14;
7353                         break;
7354                 default:
7355                         drm_dbg_kms(&dev_priv->drm,
7356                                     "Unknown DPLL mode %08x in programmed "
7357                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
7358                         return;
7359                 }
7360
7361                 if (IS_PINEVIEW(dev_priv))
7362                         port_clock = pnv_calc_dpll_params(refclk, &clock);
7363                 else
7364                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
7365         } else {
7366                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
7367                                                                  LVDS);
7368                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7369
7370                 if (is_lvds) {
7371                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7372                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
7373
7374                         if (lvds & LVDS_CLKB_POWER_UP)
7375                                 clock.p2 = 7;
7376                         else
7377                                 clock.p2 = 14;
7378                 } else {
7379                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
7380                                 clock.p1 = 2;
7381                         else {
7382                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7383                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7384                         }
7385                         if (dpll & PLL_P2_DIVIDE_BY_4)
7386                                 clock.p2 = 4;
7387                         else
7388                                 clock.p2 = 2;
7389                 }
7390
7391                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
7392         }
7393
7394         /*
7395          * This value includes pixel_multiplier. We will use
7396          * port_clock to compute adjusted_mode.crtc_clock in the
7397          * encoder's get_config() function.
7398          */
7399         pipe_config->port_clock = port_clock;
7400 }
7401
7402 int intel_dotclock_calculate(int link_freq,
7403                              const struct intel_link_m_n *m_n)
7404 {
7405         /*
7406          * The calculation for the data clock is:
7407          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7408          * But we want to avoid losing precison if possible, so:
7409          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7410          *
7411          * and the link clock is simpler:
7412          * link_clock = (m * link_clock) / n
7413          */
7414
7415         if (!m_n->link_n)
7416                 return 0;
7417
7418         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
7419 }
7420
7421 static void ilk_pch_clock_get(struct intel_crtc *crtc,
7422                               struct intel_crtc_state *pipe_config)
7423 {
7424         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7425
7426         /* read out port_clock from the DPLL */
7427         i9xx_crtc_clock_get(crtc, pipe_config);
7428
7429         /*
7430          * In case there is an active pipe without active ports,
7431          * we may need some idea for the dotclock anyway.
7432          * Calculate one based on the FDI configuration.
7433          */
7434         pipe_config->hw.adjusted_mode.crtc_clock =
7435                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
7436                                          &pipe_config->fdi_m_n);
7437 }
7438
7439 /* Returns the currently programmed mode of the given encoder. */
7440 struct drm_display_mode *
7441 intel_encoder_current_mode(struct intel_encoder *encoder)
7442 {
7443         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
7444         struct intel_crtc_state *crtc_state;
7445         struct drm_display_mode *mode;
7446         struct intel_crtc *crtc;
7447         enum pipe pipe;
7448
7449         if (!encoder->get_hw_state(encoder, &pipe))
7450                 return NULL;
7451
7452         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7453
7454         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7455         if (!mode)
7456                 return NULL;
7457
7458         crtc_state = intel_crtc_state_alloc(crtc);
7459         if (!crtc_state) {
7460                 kfree(mode);
7461                 return NULL;
7462         }
7463
7464         if (!intel_crtc_get_pipe_config(crtc_state)) {
7465                 kfree(crtc_state);
7466                 kfree(mode);
7467                 return NULL;
7468         }
7469
7470         intel_encoder_get_config(encoder, crtc_state);
7471
7472         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
7473
7474         kfree(crtc_state);
7475
7476         return mode;
7477 }
7478
7479 /**
7480  * intel_wm_need_update - Check whether watermarks need updating
7481  * @cur: current plane state
7482  * @new: new plane state
7483  *
7484  * Check current plane state versus the new one to determine whether
7485  * watermarks need to be recalculated.
7486  *
7487  * Returns true or false.
7488  */
7489 static bool intel_wm_need_update(const struct intel_plane_state *cur,
7490                                  struct intel_plane_state *new)
7491 {
7492         /* Update watermarks on tiling or size changes. */
7493         if (new->uapi.visible != cur->uapi.visible)
7494                 return true;
7495
7496         if (!cur->hw.fb || !new->hw.fb)
7497                 return false;
7498
7499         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
7500             cur->hw.rotation != new->hw.rotation ||
7501             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
7502             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
7503             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
7504             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
7505                 return true;
7506
7507         return false;
7508 }
7509
7510 static bool needs_scaling(const struct intel_plane_state *state)
7511 {
7512         int src_w = drm_rect_width(&state->uapi.src) >> 16;
7513         int src_h = drm_rect_height(&state->uapi.src) >> 16;
7514         int dst_w = drm_rect_width(&state->uapi.dst);
7515         int dst_h = drm_rect_height(&state->uapi.dst);
7516
7517         return (src_w != dst_w || src_h != dst_h);
7518 }
7519
7520 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
7521                                     struct intel_crtc_state *crtc_state,
7522                                     const struct intel_plane_state *old_plane_state,
7523                                     struct intel_plane_state *plane_state)
7524 {
7525         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7526         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7527         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7528         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7529         bool was_crtc_enabled = old_crtc_state->hw.active;
7530         bool is_crtc_enabled = crtc_state->hw.active;
7531         bool turn_off, turn_on, visible, was_visible;
7532         int ret;
7533
7534         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
7535                 ret = skl_update_scaler_plane(crtc_state, plane_state);
7536                 if (ret)
7537                         return ret;
7538         }
7539
7540         was_visible = old_plane_state->uapi.visible;
7541         visible = plane_state->uapi.visible;
7542
7543         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
7544                 was_visible = false;
7545
7546         /*
7547          * Visibility is calculated as if the crtc was on, but
7548          * after scaler setup everything depends on it being off
7549          * when the crtc isn't active.
7550          *
7551          * FIXME this is wrong for watermarks. Watermarks should also
7552          * be computed as if the pipe would be active. Perhaps move
7553          * per-plane wm computation to the .check_plane() hook, and
7554          * only combine the results from all planes in the current place?
7555          */
7556         if (!is_crtc_enabled) {
7557                 intel_plane_set_invisible(crtc_state, plane_state);
7558                 visible = false;
7559         }
7560
7561         if (!was_visible && !visible)
7562                 return 0;
7563
7564         turn_off = was_visible && (!visible || mode_changed);
7565         turn_on = visible && (!was_visible || mode_changed);
7566
7567         drm_dbg_atomic(&dev_priv->drm,
7568                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
7569                        crtc->base.base.id, crtc->base.name,
7570                        plane->base.base.id, plane->base.name,
7571                        was_visible, visible,
7572                        turn_off, turn_on, mode_changed);
7573
7574         if (turn_on) {
7575                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7576                         crtc_state->update_wm_pre = true;
7577
7578                 /* must disable cxsr around plane enable/disable */
7579                 if (plane->id != PLANE_CURSOR)
7580                         crtc_state->disable_cxsr = true;
7581         } else if (turn_off) {
7582                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7583                         crtc_state->update_wm_post = true;
7584
7585                 /* must disable cxsr around plane enable/disable */
7586                 if (plane->id != PLANE_CURSOR)
7587                         crtc_state->disable_cxsr = true;
7588         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
7589                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
7590                         /* FIXME bollocks */
7591                         crtc_state->update_wm_pre = true;
7592                         crtc_state->update_wm_post = true;
7593                 }
7594         }
7595
7596         if (visible || was_visible)
7597                 crtc_state->fb_bits |= plane->frontbuffer_bit;
7598
7599         /*
7600          * ILK/SNB DVSACNTR/Sprite Enable
7601          * IVB SPR_CTL/Sprite Enable
7602          * "When in Self Refresh Big FIFO mode, a write to enable the
7603          *  plane will be internally buffered and delayed while Big FIFO
7604          *  mode is exiting."
7605          *
7606          * Which means that enabling the sprite can take an extra frame
7607          * when we start in big FIFO mode (LP1+). Thus we need to drop
7608          * down to LP0 and wait for vblank in order to make sure the
7609          * sprite gets enabled on the next vblank after the register write.
7610          * Doing otherwise would risk enabling the sprite one frame after
7611          * we've already signalled flip completion. We can resume LP1+
7612          * once the sprite has been enabled.
7613          *
7614          *
7615          * WaCxSRDisabledForSpriteScaling:ivb
7616          * IVB SPR_SCALE/Scaling Enable
7617          * "Low Power watermarks must be disabled for at least one
7618          *  frame before enabling sprite scaling, and kept disabled
7619          *  until sprite scaling is disabled."
7620          *
7621          * ILK/SNB DVSASCALE/Scaling Enable
7622          * "When in Self Refresh Big FIFO mode, scaling enable will be
7623          *  masked off while Big FIFO mode is exiting."
7624          *
7625          * Despite the w/a only being listed for IVB we assume that
7626          * the ILK/SNB note has similar ramifications, hence we apply
7627          * the w/a on all three platforms.
7628          *
7629          * With experimental results seems this is needed also for primary
7630          * plane, not only sprite plane.
7631          */
7632         if (plane->id != PLANE_CURSOR &&
7633             (IS_GEN_RANGE(dev_priv, 5, 6) ||
7634              IS_IVYBRIDGE(dev_priv)) &&
7635             (turn_on || (!needs_scaling(old_plane_state) &&
7636                          needs_scaling(plane_state))))
7637                 crtc_state->disable_lp_wm = true;
7638
7639         return 0;
7640 }
7641
7642 static bool encoders_cloneable(const struct intel_encoder *a,
7643                                const struct intel_encoder *b)
7644 {
7645         /* masks could be asymmetric, so check both ways */
7646         return a == b || (a->cloneable & (1 << b->type) &&
7647                           b->cloneable & (1 << a->type));
7648 }
7649
7650 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
7651                                          struct intel_crtc *crtc,
7652                                          struct intel_encoder *encoder)
7653 {
7654         struct intel_encoder *source_encoder;
7655         struct drm_connector *connector;
7656         struct drm_connector_state *connector_state;
7657         int i;
7658
7659         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7660                 if (connector_state->crtc != &crtc->base)
7661                         continue;
7662
7663                 source_encoder =
7664                         to_intel_encoder(connector_state->best_encoder);
7665                 if (!encoders_cloneable(encoder, source_encoder))
7666                         return false;
7667         }
7668
7669         return true;
7670 }
7671
7672 static int icl_add_linked_planes(struct intel_atomic_state *state)
7673 {
7674         struct intel_plane *plane, *linked;
7675         struct intel_plane_state *plane_state, *linked_plane_state;
7676         int i;
7677
7678         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7679                 linked = plane_state->planar_linked_plane;
7680
7681                 if (!linked)
7682                         continue;
7683
7684                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
7685                 if (IS_ERR(linked_plane_state))
7686                         return PTR_ERR(linked_plane_state);
7687
7688                 drm_WARN_ON(state->base.dev,
7689                             linked_plane_state->planar_linked_plane != plane);
7690                 drm_WARN_ON(state->base.dev,
7691                             linked_plane_state->planar_slave == plane_state->planar_slave);
7692         }
7693
7694         return 0;
7695 }
7696
7697 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7698 {
7699         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7700         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7701         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7702         struct intel_plane *plane, *linked;
7703         struct intel_plane_state *plane_state;
7704         int i;
7705
7706         if (INTEL_GEN(dev_priv) < 11)
7707                 return 0;
7708
7709         /*
7710          * Destroy all old plane links and make the slave plane invisible
7711          * in the crtc_state->active_planes mask.
7712          */
7713         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7714                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7715                         continue;
7716
7717                 plane_state->planar_linked_plane = NULL;
7718                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
7719                         crtc_state->enabled_planes &= ~BIT(plane->id);
7720                         crtc_state->active_planes &= ~BIT(plane->id);
7721                         crtc_state->update_planes |= BIT(plane->id);
7722                 }
7723
7724                 plane_state->planar_slave = false;
7725         }
7726
7727         if (!crtc_state->nv12_planes)
7728                 return 0;
7729
7730         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7731                 struct intel_plane_state *linked_state = NULL;
7732
7733                 if (plane->pipe != crtc->pipe ||
7734                     !(crtc_state->nv12_planes & BIT(plane->id)))
7735                         continue;
7736
7737                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7738                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7739                                 continue;
7740
7741                         if (crtc_state->active_planes & BIT(linked->id))
7742                                 continue;
7743
7744                         linked_state = intel_atomic_get_plane_state(state, linked);
7745                         if (IS_ERR(linked_state))
7746                                 return PTR_ERR(linked_state);
7747
7748                         break;
7749                 }
7750
7751                 if (!linked_state) {
7752                         drm_dbg_kms(&dev_priv->drm,
7753                                     "Need %d free Y planes for planar YUV\n",
7754                                     hweight8(crtc_state->nv12_planes));
7755
7756                         return -EINVAL;
7757                 }
7758
7759                 plane_state->planar_linked_plane = linked;
7760
7761                 linked_state->planar_slave = true;
7762                 linked_state->planar_linked_plane = plane;
7763                 crtc_state->enabled_planes |= BIT(linked->id);
7764                 crtc_state->active_planes |= BIT(linked->id);
7765                 crtc_state->update_planes |= BIT(linked->id);
7766                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7767                             linked->base.name, plane->base.name);
7768
7769                 /* Copy parameters to slave plane */
7770                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7771                 linked_state->color_ctl = plane_state->color_ctl;
7772                 linked_state->view = plane_state->view;
7773                 memcpy(linked_state->color_plane, plane_state->color_plane,
7774                        sizeof(linked_state->color_plane));
7775
7776                 intel_plane_copy_hw_state(linked_state, plane_state);
7777                 linked_state->uapi.src = plane_state->uapi.src;
7778                 linked_state->uapi.dst = plane_state->uapi.dst;
7779
7780                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
7781                         if (linked->id == PLANE_SPRITE5)
7782                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7783                         else if (linked->id == PLANE_SPRITE4)
7784                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7785                         else if (linked->id == PLANE_SPRITE3)
7786                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7787                         else if (linked->id == PLANE_SPRITE2)
7788                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7789                         else
7790                                 MISSING_CASE(linked->id);
7791                 }
7792         }
7793
7794         return 0;
7795 }
7796
7797 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7798 {
7799         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7800         struct intel_atomic_state *state =
7801                 to_intel_atomic_state(new_crtc_state->uapi.state);
7802         const struct intel_crtc_state *old_crtc_state =
7803                 intel_atomic_get_old_crtc_state(state, crtc);
7804
7805         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7806 }
7807
7808 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7809 {
7810         const struct drm_display_mode *pipe_mode =
7811                 &crtc_state->hw.pipe_mode;
7812         int linetime_wm;
7813
7814         if (!crtc_state->hw.enable)
7815                 return 0;
7816
7817         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7818                                         pipe_mode->crtc_clock);
7819
7820         return min(linetime_wm, 0x1ff);
7821 }
7822
7823 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7824                                const struct intel_cdclk_state *cdclk_state)
7825 {
7826         const struct drm_display_mode *pipe_mode =
7827                 &crtc_state->hw.pipe_mode;
7828         int linetime_wm;
7829
7830         if (!crtc_state->hw.enable)
7831                 return 0;
7832
7833         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7834                                         cdclk_state->logical.cdclk);
7835
7836         return min(linetime_wm, 0x1ff);
7837 }
7838
7839 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7840 {
7841         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7842         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7843         const struct drm_display_mode *pipe_mode =
7844                 &crtc_state->hw.pipe_mode;
7845         int linetime_wm;
7846
7847         if (!crtc_state->hw.enable)
7848                 return 0;
7849
7850         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7851                                    crtc_state->pixel_rate);
7852
7853         /* Display WA #1135: BXT:ALL GLK:ALL */
7854         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
7855                 linetime_wm /= 2;
7856
7857         return min(linetime_wm, 0x1ff);
7858 }
7859
7860 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7861                                    struct intel_crtc *crtc)
7862 {
7863         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7864         struct intel_crtc_state *crtc_state =
7865                 intel_atomic_get_new_crtc_state(state, crtc);
7866         const struct intel_cdclk_state *cdclk_state;
7867
7868         if (INTEL_GEN(dev_priv) >= 9)
7869                 crtc_state->linetime = skl_linetime_wm(crtc_state);
7870         else
7871                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
7872
7873         if (!hsw_crtc_supports_ips(crtc))
7874                 return 0;
7875
7876         cdclk_state = intel_atomic_get_cdclk_state(state);
7877         if (IS_ERR(cdclk_state))
7878                 return PTR_ERR(cdclk_state);
7879
7880         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7881                                                        cdclk_state);
7882
7883         return 0;
7884 }
7885
7886 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7887                                    struct intel_crtc *crtc)
7888 {
7889         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7890         struct intel_crtc_state *crtc_state =
7891                 intel_atomic_get_new_crtc_state(state, crtc);
7892         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7893         int ret;
7894
7895         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7896             mode_changed && !crtc_state->hw.active)
7897                 crtc_state->update_wm_post = true;
7898
7899         if (mode_changed && crtc_state->hw.enable &&
7900             dev_priv->display.crtc_compute_clock &&
7901             !crtc_state->bigjoiner_slave &&
7902             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7903                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7904                 if (ret)
7905                         return ret;
7906         }
7907
7908         /*
7909          * May need to update pipe gamma enable bits
7910          * when C8 planes are getting enabled/disabled.
7911          */
7912         if (c8_planes_changed(crtc_state))
7913                 crtc_state->uapi.color_mgmt_changed = true;
7914
7915         if (mode_changed || crtc_state->update_pipe ||
7916             crtc_state->uapi.color_mgmt_changed) {
7917                 ret = intel_color_check(crtc_state);
7918                 if (ret)
7919                         return ret;
7920         }
7921
7922         if (dev_priv->display.compute_pipe_wm) {
7923                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
7924                 if (ret) {
7925                         drm_dbg_kms(&dev_priv->drm,
7926                                     "Target pipe watermarks are invalid\n");
7927                         return ret;
7928                 }
7929         }
7930
7931         if (dev_priv->display.compute_intermediate_wm) {
7932                 if (drm_WARN_ON(&dev_priv->drm,
7933                                 !dev_priv->display.compute_pipe_wm))
7934                         return 0;
7935
7936                 /*
7937                  * Calculate 'intermediate' watermarks that satisfy both the
7938                  * old state and the new state.  We can program these
7939                  * immediately.
7940                  */
7941                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
7942                 if (ret) {
7943                         drm_dbg_kms(&dev_priv->drm,
7944                                     "No valid intermediate pipe watermarks are possible\n");
7945                         return ret;
7946                 }
7947         }
7948
7949         if (INTEL_GEN(dev_priv) >= 9) {
7950                 if (mode_changed || crtc_state->update_pipe) {
7951                         ret = skl_update_scaler_crtc(crtc_state);
7952                         if (ret)
7953                                 return ret;
7954                 }
7955
7956                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7957                 if (ret)
7958                         return ret;
7959         }
7960
7961         if (HAS_IPS(dev_priv)) {
7962                 ret = hsw_compute_ips_config(crtc_state);
7963                 if (ret)
7964                         return ret;
7965         }
7966
7967         if (INTEL_GEN(dev_priv) >= 9 ||
7968             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7969                 ret = hsw_compute_linetime_wm(state, crtc);
7970                 if (ret)
7971                         return ret;
7972
7973         }
7974
7975         if (!mode_changed) {
7976                 ret = intel_psr2_sel_fetch_update(state, crtc);
7977                 if (ret)
7978                         return ret;
7979         }
7980
7981         return 0;
7982 }
7983
7984 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7985 {
7986         struct intel_connector *connector;
7987         struct drm_connector_list_iter conn_iter;
7988
7989         drm_connector_list_iter_begin(dev, &conn_iter);
7990         for_each_intel_connector_iter(connector, &conn_iter) {
7991                 if (connector->base.state->crtc)
7992                         drm_connector_put(&connector->base);
7993
7994                 if (connector->base.encoder) {
7995                         connector->base.state->best_encoder =
7996                                 connector->base.encoder;
7997                         connector->base.state->crtc =
7998                                 connector->base.encoder->crtc;
7999
8000                         drm_connector_get(&connector->base);
8001                 } else {
8002                         connector->base.state->best_encoder = NULL;
8003                         connector->base.state->crtc = NULL;
8004                 }
8005         }
8006         drm_connector_list_iter_end(&conn_iter);
8007 }
8008
8009 static int
8010 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
8011                       struct intel_crtc_state *pipe_config)
8012 {
8013         struct drm_connector *connector = conn_state->connector;
8014         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8015         const struct drm_display_info *info = &connector->display_info;
8016         int bpp;
8017
8018         switch (conn_state->max_bpc) {
8019         case 6 ... 7:
8020                 bpp = 6 * 3;
8021                 break;
8022         case 8 ... 9:
8023                 bpp = 8 * 3;
8024                 break;
8025         case 10 ... 11:
8026                 bpp = 10 * 3;
8027                 break;
8028         case 12 ... 16:
8029                 bpp = 12 * 3;
8030                 break;
8031         default:
8032                 MISSING_CASE(conn_state->max_bpc);
8033                 return -EINVAL;
8034         }
8035
8036         if (bpp < pipe_config->pipe_bpp) {
8037                 drm_dbg_kms(&i915->drm,
8038                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
8039                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
8040                             connector->base.id, connector->name,
8041                             bpp, 3 * info->bpc,
8042                             3 * conn_state->max_requested_bpc,
8043                             pipe_config->pipe_bpp);
8044
8045                 pipe_config->pipe_bpp = bpp;
8046         }
8047
8048         return 0;
8049 }
8050
8051 static int
8052 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8053                           struct intel_crtc_state *pipe_config)
8054 {
8055         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8056         struct drm_atomic_state *state = pipe_config->uapi.state;
8057         struct drm_connector *connector;
8058         struct drm_connector_state *connector_state;
8059         int bpp, i;
8060
8061         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8062             IS_CHERRYVIEW(dev_priv)))
8063                 bpp = 10*3;
8064         else if (INTEL_GEN(dev_priv) >= 5)
8065                 bpp = 12*3;
8066         else
8067                 bpp = 8*3;
8068
8069         pipe_config->pipe_bpp = bpp;
8070
8071         /* Clamp display bpp to connector max bpp */
8072         for_each_new_connector_in_state(state, connector, connector_state, i) {
8073                 int ret;
8074
8075                 if (connector_state->crtc != &crtc->base)
8076                         continue;
8077
8078                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
8079                 if (ret)
8080                         return ret;
8081         }
8082
8083         return 0;
8084 }
8085
8086 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
8087                                     const struct drm_display_mode *mode)
8088 {
8089         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
8090                     "type: 0x%x flags: 0x%x\n",
8091                     mode->crtc_clock,
8092                     mode->crtc_hdisplay, mode->crtc_hsync_start,
8093                     mode->crtc_hsync_end, mode->crtc_htotal,
8094                     mode->crtc_vdisplay, mode->crtc_vsync_start,
8095                     mode->crtc_vsync_end, mode->crtc_vtotal,
8096                     mode->type, mode->flags);
8097 }
8098
8099 static void
8100 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
8101                       const char *id, unsigned int lane_count,
8102                       const struct intel_link_m_n *m_n)
8103 {
8104         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8105
8106         drm_dbg_kms(&i915->drm,
8107                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8108                     id, lane_count,
8109                     m_n->gmch_m, m_n->gmch_n,
8110                     m_n->link_m, m_n->link_n, m_n->tu);
8111 }
8112
8113 static void
8114 intel_dump_infoframe(struct drm_i915_private *dev_priv,
8115                      const union hdmi_infoframe *frame)
8116 {
8117         if (!drm_debug_enabled(DRM_UT_KMS))
8118                 return;
8119
8120         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
8121 }
8122
8123 static void
8124 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
8125                       const struct drm_dp_vsc_sdp *vsc)
8126 {
8127         if (!drm_debug_enabled(DRM_UT_KMS))
8128                 return;
8129
8130         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
8131 }
8132
8133 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
8134
8135 static const char * const output_type_str[] = {
8136         OUTPUT_TYPE(UNUSED),
8137         OUTPUT_TYPE(ANALOG),
8138         OUTPUT_TYPE(DVO),
8139         OUTPUT_TYPE(SDVO),
8140         OUTPUT_TYPE(LVDS),
8141         OUTPUT_TYPE(TVOUT),
8142         OUTPUT_TYPE(HDMI),
8143         OUTPUT_TYPE(DP),
8144         OUTPUT_TYPE(EDP),
8145         OUTPUT_TYPE(DSI),
8146         OUTPUT_TYPE(DDI),
8147         OUTPUT_TYPE(DP_MST),
8148 };
8149
8150 #undef OUTPUT_TYPE
8151
8152 static void snprintf_output_types(char *buf, size_t len,
8153                                   unsigned int output_types)
8154 {
8155         char *str = buf;
8156         int i;
8157
8158         str[0] = '\0';
8159
8160         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
8161                 int r;
8162
8163                 if ((output_types & BIT(i)) == 0)
8164                         continue;
8165
8166                 r = snprintf(str, len, "%s%s",
8167                              str != buf ? "," : "", output_type_str[i]);
8168                 if (r >= len)
8169                         break;
8170                 str += r;
8171                 len -= r;
8172
8173                 output_types &= ~BIT(i);
8174         }
8175
8176         WARN_ON_ONCE(output_types != 0);
8177 }
8178
8179 static const char * const output_format_str[] = {
8180         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
8181         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
8182         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
8183         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
8184 };
8185
8186 static const char *output_formats(enum intel_output_format format)
8187 {
8188         if (format >= ARRAY_SIZE(output_format_str))
8189                 format = INTEL_OUTPUT_FORMAT_INVALID;
8190         return output_format_str[format];
8191 }
8192
8193 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
8194 {
8195         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
8196         struct drm_i915_private *i915 = to_i915(plane->base.dev);
8197         const struct drm_framebuffer *fb = plane_state->hw.fb;
8198         struct drm_format_name_buf format_name;
8199
8200         if (!fb) {
8201                 drm_dbg_kms(&i915->drm,
8202                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
8203                             plane->base.base.id, plane->base.name,
8204                             yesno(plane_state->uapi.visible));
8205                 return;
8206         }
8207
8208         drm_dbg_kms(&i915->drm,
8209                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
8210                     plane->base.base.id, plane->base.name,
8211                     fb->base.id, fb->width, fb->height,
8212                     drm_get_format_name(fb->format->format, &format_name),
8213                     fb->modifier, yesno(plane_state->uapi.visible));
8214         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
8215                     plane_state->hw.rotation, plane_state->scaler_id);
8216         if (plane_state->uapi.visible)
8217                 drm_dbg_kms(&i915->drm,
8218                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
8219                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
8220                             DRM_RECT_ARG(&plane_state->uapi.dst));
8221 }
8222
8223 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
8224                                    struct intel_atomic_state *state,
8225                                    const char *context)
8226 {
8227         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8228         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8229         const struct intel_plane_state *plane_state;
8230         struct intel_plane *plane;
8231         char buf[64];
8232         int i;
8233
8234         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
8235                     crtc->base.base.id, crtc->base.name,
8236                     yesno(pipe_config->hw.enable), context);
8237
8238         if (!pipe_config->hw.enable)
8239                 goto dump_planes;
8240
8241         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
8242         drm_dbg_kms(&dev_priv->drm,
8243                     "active: %s, output_types: %s (0x%x), output format: %s\n",
8244                     yesno(pipe_config->hw.active),
8245                     buf, pipe_config->output_types,
8246                     output_formats(pipe_config->output_format));
8247
8248         drm_dbg_kms(&dev_priv->drm,
8249                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
8250                     transcoder_name(pipe_config->cpu_transcoder),
8251                     pipe_config->pipe_bpp, pipe_config->dither);
8252
8253         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
8254                     transcoder_name(pipe_config->mst_master_transcoder));
8255
8256         drm_dbg_kms(&dev_priv->drm,
8257                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
8258                     transcoder_name(pipe_config->master_transcoder),
8259                     pipe_config->sync_mode_slaves_mask);
8260
8261         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
8262                     pipe_config->bigjoiner_slave ? "slave" :
8263                     pipe_config->bigjoiner ? "master" : "no");
8264
8265         if (pipe_config->has_pch_encoder)
8266                 intel_dump_m_n_config(pipe_config, "fdi",
8267                                       pipe_config->fdi_lanes,
8268                                       &pipe_config->fdi_m_n);
8269
8270         if (intel_crtc_has_dp_encoder(pipe_config)) {
8271                 intel_dump_m_n_config(pipe_config, "dp m_n",
8272                                 pipe_config->lane_count, &pipe_config->dp_m_n);
8273                 if (pipe_config->has_drrs)
8274                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
8275                                               pipe_config->lane_count,
8276                                               &pipe_config->dp_m2_n2);
8277         }
8278
8279         drm_dbg_kms(&dev_priv->drm,
8280                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
8281                     pipe_config->has_audio, pipe_config->has_infoframe,
8282                     pipe_config->infoframes.enable);
8283
8284         if (pipe_config->infoframes.enable &
8285             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
8286                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
8287                             pipe_config->infoframes.gcp);
8288         if (pipe_config->infoframes.enable &
8289             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
8290                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
8291         if (pipe_config->infoframes.enable &
8292             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
8293                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
8294         if (pipe_config->infoframes.enable &
8295             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
8296                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
8297         if (pipe_config->infoframes.enable &
8298             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
8299                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8300         if (pipe_config->infoframes.enable &
8301             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
8302                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8303         if (pipe_config->infoframes.enable &
8304             intel_hdmi_infoframe_enable(DP_SDP_VSC))
8305                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
8306
8307         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
8308                     yesno(pipe_config->vrr.enable),
8309                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
8310                     pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
8311                     intel_vrr_vmin_vblank_start(pipe_config),
8312                     intel_vrr_vmax_vblank_start(pipe_config));
8313
8314         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
8315         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
8316         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
8317         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
8318         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
8319         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
8320         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
8321         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
8322         drm_dbg_kms(&dev_priv->drm,
8323                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
8324                     pipe_config->port_clock,
8325                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
8326                     pipe_config->pixel_rate);
8327
8328         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
8329                     pipe_config->linetime, pipe_config->ips_linetime);
8330
8331         if (INTEL_GEN(dev_priv) >= 9)
8332                 drm_dbg_kms(&dev_priv->drm,
8333                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
8334                             crtc->num_scalers,
8335                             pipe_config->scaler_state.scaler_users,
8336                             pipe_config->scaler_state.scaler_id);
8337
8338         if (HAS_GMCH(dev_priv))
8339                 drm_dbg_kms(&dev_priv->drm,
8340                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8341                             pipe_config->gmch_pfit.control,
8342                             pipe_config->gmch_pfit.pgm_ratios,
8343                             pipe_config->gmch_pfit.lvds_border_bits);
8344         else
8345                 drm_dbg_kms(&dev_priv->drm,
8346                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
8347                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
8348                             enableddisabled(pipe_config->pch_pfit.enabled),
8349                             yesno(pipe_config->pch_pfit.force_thru));
8350
8351         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
8352                     pipe_config->ips_enabled, pipe_config->double_wide);
8353
8354         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
8355
8356         if (IS_CHERRYVIEW(dev_priv))
8357                 drm_dbg_kms(&dev_priv->drm,
8358                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8359                             pipe_config->cgm_mode, pipe_config->gamma_mode,
8360                             pipe_config->gamma_enable, pipe_config->csc_enable);
8361         else
8362                 drm_dbg_kms(&dev_priv->drm,
8363                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8364                             pipe_config->csc_mode, pipe_config->gamma_mode,
8365                             pipe_config->gamma_enable, pipe_config->csc_enable);
8366
8367         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
8368                     pipe_config->hw.degamma_lut ?
8369                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
8370                     pipe_config->hw.gamma_lut ?
8371                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
8372
8373 dump_planes:
8374         if (!state)
8375                 return;
8376
8377         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8378                 if (plane->pipe == crtc->pipe)
8379                         intel_dump_plane_state(plane_state);
8380         }
8381 }
8382
8383 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
8384 {
8385         struct drm_device *dev = state->base.dev;
8386         struct drm_connector *connector;
8387         struct drm_connector_list_iter conn_iter;
8388         unsigned int used_ports = 0;
8389         unsigned int used_mst_ports = 0;
8390         bool ret = true;
8391
8392         /*
8393          * We're going to peek into connector->state,
8394          * hence connection_mutex must be held.
8395          */
8396         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
8397
8398         /*
8399          * Walk the connector list instead of the encoder
8400          * list to detect the problem on ddi platforms
8401          * where there's just one encoder per digital port.
8402          */
8403         drm_connector_list_iter_begin(dev, &conn_iter);
8404         drm_for_each_connector_iter(connector, &conn_iter) {
8405                 struct drm_connector_state *connector_state;
8406                 struct intel_encoder *encoder;
8407
8408                 connector_state =
8409                         drm_atomic_get_new_connector_state(&state->base,
8410                                                            connector);
8411                 if (!connector_state)
8412                         connector_state = connector->state;
8413
8414                 if (!connector_state->best_encoder)
8415                         continue;
8416
8417                 encoder = to_intel_encoder(connector_state->best_encoder);
8418
8419                 drm_WARN_ON(dev, !connector_state->crtc);
8420
8421                 switch (encoder->type) {
8422                 case INTEL_OUTPUT_DDI:
8423                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
8424                                 break;
8425                         fallthrough;
8426                 case INTEL_OUTPUT_DP:
8427                 case INTEL_OUTPUT_HDMI:
8428                 case INTEL_OUTPUT_EDP:
8429                         /* the same port mustn't appear more than once */
8430                         if (used_ports & BIT(encoder->port))
8431                                 ret = false;
8432
8433                         used_ports |= BIT(encoder->port);
8434                         break;
8435                 case INTEL_OUTPUT_DP_MST:
8436                         used_mst_ports |=
8437                                 1 << encoder->port;
8438                         break;
8439                 default:
8440                         break;
8441                 }
8442         }
8443         drm_connector_list_iter_end(&conn_iter);
8444
8445         /* can't mix MST and SST/HDMI on the same port */
8446         if (used_ports & used_mst_ports)
8447                 return false;
8448
8449         return ret;
8450 }
8451
8452 static void
8453 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
8454                                            struct intel_crtc_state *crtc_state)
8455 {
8456         const struct intel_crtc_state *from_crtc_state = crtc_state;
8457
8458         if (crtc_state->bigjoiner_slave) {
8459                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
8460                                                                   crtc_state->bigjoiner_linked_crtc);
8461
8462                 /* No need to copy state if the master state is unchanged */
8463                 if (!from_crtc_state)
8464                         return;
8465         }
8466
8467         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
8468 }
8469
8470 static void
8471 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
8472                                  struct intel_crtc_state *crtc_state)
8473 {
8474         crtc_state->hw.enable = crtc_state->uapi.enable;
8475         crtc_state->hw.active = crtc_state->uapi.active;
8476         crtc_state->hw.mode = crtc_state->uapi.mode;
8477         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
8478         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
8479
8480         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
8481 }
8482
8483 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
8484 {
8485         if (crtc_state->bigjoiner_slave)
8486                 return;
8487
8488         crtc_state->uapi.enable = crtc_state->hw.enable;
8489         crtc_state->uapi.active = crtc_state->hw.active;
8490         drm_WARN_ON(crtc_state->uapi.crtc->dev,
8491                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
8492
8493         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
8494         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
8495
8496         /* copy color blobs to uapi */
8497         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
8498                                   crtc_state->hw.degamma_lut);
8499         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
8500                                   crtc_state->hw.gamma_lut);
8501         drm_property_replace_blob(&crtc_state->uapi.ctm,
8502                                   crtc_state->hw.ctm);
8503 }
8504
8505 static int
8506 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
8507                           const struct intel_crtc_state *from_crtc_state)
8508 {
8509         struct intel_crtc_state *saved_state;
8510         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8511
8512         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
8513         if (!saved_state)
8514                 return -ENOMEM;
8515
8516         saved_state->uapi = crtc_state->uapi;
8517         saved_state->scaler_state = crtc_state->scaler_state;
8518         saved_state->shared_dpll = crtc_state->shared_dpll;
8519         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8520         saved_state->crc_enabled = crtc_state->crc_enabled;
8521
8522         intel_crtc_free_hw_state(crtc_state);
8523         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8524         kfree(saved_state);
8525
8526         /* Re-init hw state */
8527         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
8528         crtc_state->hw.enable = from_crtc_state->hw.enable;
8529         crtc_state->hw.active = from_crtc_state->hw.active;
8530         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
8531         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
8532
8533         /* Some fixups */
8534         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
8535         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
8536         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
8537         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
8538         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
8539         crtc_state->bigjoiner_slave = true;
8540         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
8541         crtc_state->has_audio = false;
8542
8543         return 0;
8544 }
8545
8546 static int
8547 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
8548                                  struct intel_crtc_state *crtc_state)
8549 {
8550         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8551         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8552         struct intel_crtc_state *saved_state;
8553
8554         saved_state = intel_crtc_state_alloc(crtc);
8555         if (!saved_state)
8556                 return -ENOMEM;
8557
8558         /* free the old crtc_state->hw members */
8559         intel_crtc_free_hw_state(crtc_state);
8560
8561         /* FIXME: before the switch to atomic started, a new pipe_config was
8562          * kzalloc'd. Code that depends on any field being zero should be
8563          * fixed, so that the crtc_state can be safely duplicated. For now,
8564          * only fields that are know to not cause problems are preserved. */
8565
8566         saved_state->uapi = crtc_state->uapi;
8567         saved_state->scaler_state = crtc_state->scaler_state;
8568         saved_state->shared_dpll = crtc_state->shared_dpll;
8569         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8570         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
8571                sizeof(saved_state->icl_port_dplls));
8572         saved_state->crc_enabled = crtc_state->crc_enabled;
8573         if (IS_G4X(dev_priv) ||
8574             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8575                 saved_state->wm = crtc_state->wm;
8576
8577         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8578         kfree(saved_state);
8579
8580         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
8581
8582         return 0;
8583 }
8584
8585 static int
8586 intel_modeset_pipe_config(struct intel_atomic_state *state,
8587                           struct intel_crtc_state *pipe_config)
8588 {
8589         struct drm_crtc *crtc = pipe_config->uapi.crtc;
8590         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8591         struct drm_connector *connector;
8592         struct drm_connector_state *connector_state;
8593         int base_bpp, ret, i;
8594         bool retry = true;
8595
8596         pipe_config->cpu_transcoder =
8597                 (enum transcoder) to_intel_crtc(crtc)->pipe;
8598
8599         /*
8600          * Sanitize sync polarity flags based on requested ones. If neither
8601          * positive or negative polarity is requested, treat this as meaning
8602          * negative polarity.
8603          */
8604         if (!(pipe_config->hw.adjusted_mode.flags &
8605               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8606                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8607
8608         if (!(pipe_config->hw.adjusted_mode.flags &
8609               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8610                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8611
8612         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8613                                         pipe_config);
8614         if (ret)
8615                 return ret;
8616
8617         base_bpp = pipe_config->pipe_bpp;
8618
8619         /*
8620          * Determine the real pipe dimensions. Note that stereo modes can
8621          * increase the actual pipe size due to the frame doubling and
8622          * insertion of additional space for blanks between the frame. This
8623          * is stored in the crtc timings. We use the requested mode to do this
8624          * computation to clearly distinguish it from the adjusted mode, which
8625          * can be changed by the connectors in the below retry loop.
8626          */
8627         drm_mode_get_hv_timing(&pipe_config->hw.mode,
8628                                &pipe_config->pipe_src_w,
8629                                &pipe_config->pipe_src_h);
8630
8631         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8632                 struct intel_encoder *encoder =
8633                         to_intel_encoder(connector_state->best_encoder);
8634
8635                 if (connector_state->crtc != crtc)
8636                         continue;
8637
8638                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
8639                         drm_dbg_kms(&i915->drm,
8640                                     "rejecting invalid cloning configuration\n");
8641                         return -EINVAL;
8642                 }
8643
8644                 /*
8645                  * Determine output_types before calling the .compute_config()
8646                  * hooks so that the hooks can use this information safely.
8647                  */
8648                 if (encoder->compute_output_type)
8649                         pipe_config->output_types |=
8650                                 BIT(encoder->compute_output_type(encoder, pipe_config,
8651                                                                  connector_state));
8652                 else
8653                         pipe_config->output_types |= BIT(encoder->type);
8654         }
8655
8656 encoder_retry:
8657         /* Ensure the port clock defaults are reset when retrying. */
8658         pipe_config->port_clock = 0;
8659         pipe_config->pixel_multiplier = 1;
8660
8661         /* Fill in default crtc timings, allow encoders to overwrite them. */
8662         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
8663                               CRTC_STEREO_DOUBLE);
8664
8665         /* Pass our mode to the connectors and the CRTC to give them a chance to
8666          * adjust it according to limitations or connector properties, and also
8667          * a chance to reject the mode entirely.
8668          */
8669         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8670                 struct intel_encoder *encoder =
8671                         to_intel_encoder(connector_state->best_encoder);
8672
8673                 if (connector_state->crtc != crtc)
8674                         continue;
8675
8676                 ret = encoder->compute_config(encoder, pipe_config,
8677                                               connector_state);
8678                 if (ret < 0) {
8679                         if (ret != -EDEADLK)
8680                                 drm_dbg_kms(&i915->drm,
8681                                             "Encoder config failure: %d\n",
8682                                             ret);
8683                         return ret;
8684                 }
8685         }
8686
8687         /* Set default port clock if not overwritten by the encoder. Needs to be
8688          * done afterwards in case the encoder adjusts the mode. */
8689         if (!pipe_config->port_clock)
8690                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8691                         * pipe_config->pixel_multiplier;
8692
8693         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8694         if (ret == -EDEADLK)
8695                 return ret;
8696         if (ret < 0) {
8697                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8698                 return ret;
8699         }
8700
8701         if (ret == I915_DISPLAY_CONFIG_RETRY) {
8702                 if (drm_WARN(&i915->drm, !retry,
8703                              "loop in pipe configuration computation\n"))
8704                         return -EINVAL;
8705
8706                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8707                 retry = false;
8708                 goto encoder_retry;
8709         }
8710
8711         /* Dithering seems to not pass-through bits correctly when it should, so
8712          * only enable it on 6bpc panels and when its not a compliance
8713          * test requesting 6bpc video pattern.
8714          */
8715         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8716                 !pipe_config->dither_force_disable;
8717         drm_dbg_kms(&i915->drm,
8718                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8719                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8720
8721         return 0;
8722 }
8723
8724 static int
8725 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8726 {
8727         struct intel_atomic_state *state =
8728                 to_intel_atomic_state(crtc_state->uapi.state);
8729         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8730         struct drm_connector_state *conn_state;
8731         struct drm_connector *connector;
8732         int i;
8733
8734         for_each_new_connector_in_state(&state->base, connector,
8735                                         conn_state, i) {
8736                 struct intel_encoder *encoder =
8737                         to_intel_encoder(conn_state->best_encoder);
8738                 int ret;
8739
8740                 if (conn_state->crtc != &crtc->base ||
8741                     !encoder->compute_config_late)
8742                         continue;
8743
8744                 ret = encoder->compute_config_late(encoder, crtc_state,
8745                                                    conn_state);
8746                 if (ret)
8747                         return ret;
8748         }
8749
8750         return 0;
8751 }
8752
8753 bool intel_fuzzy_clock_check(int clock1, int clock2)
8754 {
8755         int diff;
8756
8757         if (clock1 == clock2)
8758                 return true;
8759
8760         if (!clock1 || !clock2)
8761                 return false;
8762
8763         diff = abs(clock1 - clock2);
8764
8765         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8766                 return true;
8767
8768         return false;
8769 }
8770
8771 static bool
8772 intel_compare_m_n(unsigned int m, unsigned int n,
8773                   unsigned int m2, unsigned int n2,
8774                   bool exact)
8775 {
8776         if (m == m2 && n == n2)
8777                 return true;
8778
8779         if (exact || !m || !n || !m2 || !n2)
8780                 return false;
8781
8782         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8783
8784         if (n > n2) {
8785                 while (n > n2) {
8786                         m2 <<= 1;
8787                         n2 <<= 1;
8788                 }
8789         } else if (n < n2) {
8790                 while (n < n2) {
8791                         m <<= 1;
8792                         n <<= 1;
8793                 }
8794         }
8795
8796         if (n != n2)
8797                 return false;
8798
8799         return intel_fuzzy_clock_check(m, m2);
8800 }
8801
8802 static bool
8803 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8804                        const struct intel_link_m_n *m2_n2,
8805                        bool exact)
8806 {
8807         return m_n->tu == m2_n2->tu &&
8808                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8809                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8810                 intel_compare_m_n(m_n->link_m, m_n->link_n,
8811                                   m2_n2->link_m, m2_n2->link_n, exact);
8812 }
8813
8814 static bool
8815 intel_compare_infoframe(const union hdmi_infoframe *a,
8816                         const union hdmi_infoframe *b)
8817 {
8818         return memcmp(a, b, sizeof(*a)) == 0;
8819 }
8820
8821 static bool
8822 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8823                          const struct drm_dp_vsc_sdp *b)
8824 {
8825         return memcmp(a, b, sizeof(*a)) == 0;
8826 }
8827
8828 static void
8829 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8830                                bool fastset, const char *name,
8831                                const union hdmi_infoframe *a,
8832                                const union hdmi_infoframe *b)
8833 {
8834         if (fastset) {
8835                 if (!drm_debug_enabled(DRM_UT_KMS))
8836                         return;
8837
8838                 drm_dbg_kms(&dev_priv->drm,
8839                             "fastset mismatch in %s infoframe\n", name);
8840                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8841                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8842                 drm_dbg_kms(&dev_priv->drm, "found:\n");
8843                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8844         } else {
8845                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8846                 drm_err(&dev_priv->drm, "expected:\n");
8847                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8848                 drm_err(&dev_priv->drm, "found:\n");
8849                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8850         }
8851 }
8852
8853 static void
8854 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8855                                 bool fastset, const char *name,
8856                                 const struct drm_dp_vsc_sdp *a,
8857                                 const struct drm_dp_vsc_sdp *b)
8858 {
8859         if (fastset) {
8860                 if (!drm_debug_enabled(DRM_UT_KMS))
8861                         return;
8862
8863                 drm_dbg_kms(&dev_priv->drm,
8864                             "fastset mismatch in %s dp sdp\n", name);
8865                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8866                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8867                 drm_dbg_kms(&dev_priv->drm, "found:\n");
8868                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8869         } else {
8870                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8871                 drm_err(&dev_priv->drm, "expected:\n");
8872                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8873                 drm_err(&dev_priv->drm, "found:\n");
8874                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8875         }
8876 }
8877
8878 static void __printf(4, 5)
8879 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8880                      const char *name, const char *format, ...)
8881 {
8882         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8883         struct va_format vaf;
8884         va_list args;
8885
8886         va_start(args, format);
8887         vaf.fmt = format;
8888         vaf.va = &args;
8889
8890         if (fastset)
8891                 drm_dbg_kms(&i915->drm,
8892                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8893                             crtc->base.base.id, crtc->base.name, name, &vaf);
8894         else
8895                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8896                         crtc->base.base.id, crtc->base.name, name, &vaf);
8897
8898         va_end(args);
8899 }
8900
8901 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8902 {
8903         if (dev_priv->params.fastboot != -1)
8904                 return dev_priv->params.fastboot;
8905
8906         /* Enable fastboot by default on Skylake and newer */
8907         if (INTEL_GEN(dev_priv) >= 9)
8908                 return true;
8909
8910         /* Enable fastboot by default on VLV and CHV */
8911         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8912                 return true;
8913
8914         /* Disabled by default on all others */
8915         return false;
8916 }
8917
8918 static bool
8919 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8920                           const struct intel_crtc_state *pipe_config,
8921                           bool fastset)
8922 {
8923         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8924         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8925         bool ret = true;
8926         u32 bp_gamma = 0;
8927         bool fixup_inherited = fastset &&
8928                 current_config->inherited && !pipe_config->inherited;
8929
8930         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8931                 drm_dbg_kms(&dev_priv->drm,
8932                             "initial modeset and fastboot not set\n");
8933                 ret = false;
8934         }
8935
8936 #define PIPE_CONF_CHECK_X(name) do { \
8937         if (current_config->name != pipe_config->name) { \
8938                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8939                                      "(expected 0x%08x, found 0x%08x)", \
8940                                      current_config->name, \
8941                                      pipe_config->name); \
8942                 ret = false; \
8943         } \
8944 } while (0)
8945
8946 #define PIPE_CONF_CHECK_I(name) do { \
8947         if (current_config->name != pipe_config->name) { \
8948                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8949                                      "(expected %i, found %i)", \
8950                                      current_config->name, \
8951                                      pipe_config->name); \
8952                 ret = false; \
8953         } \
8954 } while (0)
8955
8956 #define PIPE_CONF_CHECK_BOOL(name) do { \
8957         if (current_config->name != pipe_config->name) { \
8958                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8959                                      "(expected %s, found %s)", \
8960                                      yesno(current_config->name), \
8961                                      yesno(pipe_config->name)); \
8962                 ret = false; \
8963         } \
8964 } while (0)
8965
8966 /*
8967  * Checks state where we only read out the enabling, but not the entire
8968  * state itself (like full infoframes or ELD for audio). These states
8969  * require a full modeset on bootup to fix up.
8970  */
8971 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8972         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8973                 PIPE_CONF_CHECK_BOOL(name); \
8974         } else { \
8975                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8976                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8977                                      yesno(current_config->name), \
8978                                      yesno(pipe_config->name)); \
8979                 ret = false; \
8980         } \
8981 } while (0)
8982
8983 #define PIPE_CONF_CHECK_P(name) do { \
8984         if (current_config->name != pipe_config->name) { \
8985                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8986                                      "(expected %p, found %p)", \
8987                                      current_config->name, \
8988                                      pipe_config->name); \
8989                 ret = false; \
8990         } \
8991 } while (0)
8992
8993 #define PIPE_CONF_CHECK_M_N(name) do { \
8994         if (!intel_compare_link_m_n(&current_config->name, \
8995                                     &pipe_config->name,\
8996                                     !fastset)) { \
8997                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8998                                      "(expected tu %i gmch %i/%i link %i/%i, " \
8999                                      "found tu %i, gmch %i/%i link %i/%i)", \
9000                                      current_config->name.tu, \
9001                                      current_config->name.gmch_m, \
9002                                      current_config->name.gmch_n, \
9003                                      current_config->name.link_m, \
9004                                      current_config->name.link_n, \
9005                                      pipe_config->name.tu, \
9006                                      pipe_config->name.gmch_m, \
9007                                      pipe_config->name.gmch_n, \
9008                                      pipe_config->name.link_m, \
9009                                      pipe_config->name.link_n); \
9010                 ret = false; \
9011         } \
9012 } while (0)
9013
9014 /* This is required for BDW+ where there is only one set of registers for
9015  * switching between high and low RR.
9016  * This macro can be used whenever a comparison has to be made between one
9017  * hw state and multiple sw state variables.
9018  */
9019 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
9020         if (!intel_compare_link_m_n(&current_config->name, \
9021                                     &pipe_config->name, !fastset) && \
9022             !intel_compare_link_m_n(&current_config->alt_name, \
9023                                     &pipe_config->name, !fastset)) { \
9024                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
9025                                      "(expected tu %i gmch %i/%i link %i/%i, " \
9026                                      "or tu %i gmch %i/%i link %i/%i, " \
9027                                      "found tu %i, gmch %i/%i link %i/%i)", \
9028                                      current_config->name.tu, \
9029                                      current_config->name.gmch_m, \
9030                                      current_config->name.gmch_n, \
9031                                      current_config->name.link_m, \
9032                                      current_config->name.link_n, \
9033                                      current_config->alt_name.tu, \
9034                                      current_config->alt_name.gmch_m, \
9035                                      current_config->alt_name.gmch_n, \
9036                                      current_config->alt_name.link_m, \
9037                                      current_config->alt_name.link_n, \
9038                                      pipe_config->name.tu, \
9039                                      pipe_config->name.gmch_m, \
9040                                      pipe_config->name.gmch_n, \
9041                                      pipe_config->name.link_m, \
9042                                      pipe_config->name.link_n); \
9043                 ret = false; \
9044         } \
9045 } while (0)
9046
9047 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
9048         if ((current_config->name ^ pipe_config->name) & (mask)) { \
9049                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
9050                                      "(%x) (expected %i, found %i)", \
9051                                      (mask), \
9052                                      current_config->name & (mask), \
9053                                      pipe_config->name & (mask)); \
9054                 ret = false; \
9055         } \
9056 } while (0)
9057
9058 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
9059         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9060                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
9061                                      "(expected %i, found %i)", \
9062                                      current_config->name, \
9063                                      pipe_config->name); \
9064                 ret = false; \
9065         } \
9066 } while (0)
9067
9068 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
9069         if (!intel_compare_infoframe(&current_config->infoframes.name, \
9070                                      &pipe_config->infoframes.name)) { \
9071                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
9072                                                &current_config->infoframes.name, \
9073                                                &pipe_config->infoframes.name); \
9074                 ret = false; \
9075         } \
9076 } while (0)
9077
9078 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
9079         if (!current_config->has_psr && !pipe_config->has_psr && \
9080             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
9081                                       &pipe_config->infoframes.name)) { \
9082                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
9083                                                 &current_config->infoframes.name, \
9084                                                 &pipe_config->infoframes.name); \
9085                 ret = false; \
9086         } \
9087 } while (0)
9088
9089 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
9090         if (current_config->name1 != pipe_config->name1) { \
9091                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
9092                                 "(expected %i, found %i, won't compare lut values)", \
9093                                 current_config->name1, \
9094                                 pipe_config->name1); \
9095                 ret = false;\
9096         } else { \
9097                 if (!intel_color_lut_equal(current_config->name2, \
9098                                         pipe_config->name2, pipe_config->name1, \
9099                                         bit_precision)) { \
9100                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
9101                                         "hw_state doesn't match sw_state"); \
9102                         ret = false; \
9103                 } \
9104         } \
9105 } while (0)
9106
9107 #define PIPE_CONF_QUIRK(quirk) \
9108         ((current_config->quirks | pipe_config->quirks) & (quirk))
9109
9110         PIPE_CONF_CHECK_I(cpu_transcoder);
9111
9112         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
9113         PIPE_CONF_CHECK_I(fdi_lanes);
9114         PIPE_CONF_CHECK_M_N(fdi_m_n);
9115
9116         PIPE_CONF_CHECK_I(lane_count);
9117         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
9118
9119         if (INTEL_GEN(dev_priv) < 8) {
9120                 PIPE_CONF_CHECK_M_N(dp_m_n);
9121
9122                 if (current_config->has_drrs)
9123                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
9124         } else
9125                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
9126
9127         PIPE_CONF_CHECK_X(output_types);
9128
9129         /* FIXME do the readout properly and get rid of this quirk */
9130         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
9131                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
9132                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
9133                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
9134                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
9135                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
9136                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
9137
9138                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
9139                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
9140                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
9141                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
9142                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
9143                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
9144
9145                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
9146                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
9147                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
9148                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
9149                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
9150                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
9151
9152                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
9153                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
9154                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
9155                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
9156                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
9157                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
9158
9159                 PIPE_CONF_CHECK_I(pixel_multiplier);
9160
9161                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9162                                       DRM_MODE_FLAG_INTERLACE);
9163
9164                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9165                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9166                                               DRM_MODE_FLAG_PHSYNC);
9167                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9168                                               DRM_MODE_FLAG_NHSYNC);
9169                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9170                                               DRM_MODE_FLAG_PVSYNC);
9171                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
9172                                               DRM_MODE_FLAG_NVSYNC);
9173                 }
9174         }
9175
9176         PIPE_CONF_CHECK_I(output_format);
9177         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
9178         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
9179             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9180                 PIPE_CONF_CHECK_BOOL(limited_color_range);
9181
9182         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
9183         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
9184         PIPE_CONF_CHECK_BOOL(has_infoframe);
9185         /* FIXME do the readout properly and get rid of this quirk */
9186         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9187                 PIPE_CONF_CHECK_BOOL(fec_enable);
9188
9189         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
9190
9191         PIPE_CONF_CHECK_X(gmch_pfit.control);
9192         /* pfit ratios are autocomputed by the hw on gen4+ */
9193         if (INTEL_GEN(dev_priv) < 4)
9194                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
9195         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9196
9197         /*
9198          * Changing the EDP transcoder input mux
9199          * (A_ONOFF vs. A_ON) requires a full modeset.
9200          */
9201         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
9202
9203         if (!fastset) {
9204                 PIPE_CONF_CHECK_I(pipe_src_w);
9205                 PIPE_CONF_CHECK_I(pipe_src_h);
9206
9207                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
9208                 if (current_config->pch_pfit.enabled) {
9209                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
9210                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
9211                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
9212                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
9213                 }
9214
9215                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
9216                 /* FIXME do the readout properly and get rid of this quirk */
9217                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9218                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
9219
9220                 PIPE_CONF_CHECK_X(gamma_mode);
9221                 if (IS_CHERRYVIEW(dev_priv))
9222                         PIPE_CONF_CHECK_X(cgm_mode);
9223                 else
9224                         PIPE_CONF_CHECK_X(csc_mode);
9225                 PIPE_CONF_CHECK_BOOL(gamma_enable);
9226                 PIPE_CONF_CHECK_BOOL(csc_enable);
9227
9228                 PIPE_CONF_CHECK_I(linetime);
9229                 PIPE_CONF_CHECK_I(ips_linetime);
9230
9231                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
9232                 if (bp_gamma)
9233                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
9234         }
9235
9236         PIPE_CONF_CHECK_BOOL(double_wide);
9237
9238         PIPE_CONF_CHECK_P(shared_dpll);
9239
9240         /* FIXME do the readout properly and get rid of this quirk */
9241         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
9242                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9243                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9244                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9245                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9246                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
9247                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
9248                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
9249                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
9250                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
9251                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
9252                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
9253                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
9254                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
9255                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
9256                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
9257                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
9258                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
9259                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
9260                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
9261                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
9262                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
9263                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
9264                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
9265                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
9266                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
9267                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
9268                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
9269                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
9270                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
9271                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
9272                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
9273
9274                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
9275                 PIPE_CONF_CHECK_X(dsi_pll.div);
9276
9277                 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
9278                         PIPE_CONF_CHECK_I(pipe_bpp);
9279
9280                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
9281                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
9282                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9283
9284                 PIPE_CONF_CHECK_I(min_voltage_level);
9285         }
9286
9287         PIPE_CONF_CHECK_X(infoframes.enable);
9288         PIPE_CONF_CHECK_X(infoframes.gcp);
9289         PIPE_CONF_CHECK_INFOFRAME(avi);
9290         PIPE_CONF_CHECK_INFOFRAME(spd);
9291         PIPE_CONF_CHECK_INFOFRAME(hdmi);
9292         PIPE_CONF_CHECK_INFOFRAME(drm);
9293         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
9294
9295         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
9296         PIPE_CONF_CHECK_I(master_transcoder);
9297         PIPE_CONF_CHECK_BOOL(bigjoiner);
9298         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
9299         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
9300
9301         PIPE_CONF_CHECK_I(dsc.compression_enable);
9302         PIPE_CONF_CHECK_I(dsc.dsc_split);
9303         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
9304
9305         PIPE_CONF_CHECK_I(mst_master_transcoder);
9306
9307         PIPE_CONF_CHECK_BOOL(vrr.enable);
9308         PIPE_CONF_CHECK_I(vrr.vmin);
9309         PIPE_CONF_CHECK_I(vrr.vmax);
9310         PIPE_CONF_CHECK_I(vrr.flipline);
9311         PIPE_CONF_CHECK_I(vrr.pipeline_full);
9312
9313 #undef PIPE_CONF_CHECK_X
9314 #undef PIPE_CONF_CHECK_I
9315 #undef PIPE_CONF_CHECK_BOOL
9316 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
9317 #undef PIPE_CONF_CHECK_P
9318 #undef PIPE_CONF_CHECK_FLAGS
9319 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
9320 #undef PIPE_CONF_CHECK_COLOR_LUT
9321 #undef PIPE_CONF_QUIRK
9322
9323         return ret;
9324 }
9325
9326 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
9327                                            const struct intel_crtc_state *pipe_config)
9328 {
9329         if (pipe_config->has_pch_encoder) {
9330                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
9331                                                             &pipe_config->fdi_m_n);
9332                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
9333
9334                 /*
9335                  * FDI already provided one idea for the dotclock.
9336                  * Yell if the encoder disagrees.
9337                  */
9338                 drm_WARN(&dev_priv->drm,
9339                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
9340                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9341                          fdi_dotclock, dotclock);
9342         }
9343 }
9344
9345 static void verify_wm_state(struct intel_crtc *crtc,
9346                             struct intel_crtc_state *new_crtc_state)
9347 {
9348         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9349         struct skl_hw_state {
9350                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
9351                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
9352                 struct skl_pipe_wm wm;
9353         } *hw;
9354         struct skl_pipe_wm *sw_wm;
9355         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
9356         u8 hw_enabled_slices;
9357         const enum pipe pipe = crtc->pipe;
9358         int plane, level, max_level = ilk_wm_max_level(dev_priv);
9359
9360         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
9361                 return;
9362
9363         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
9364         if (!hw)
9365                 return;
9366
9367         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
9368         sw_wm = &new_crtc_state->wm.skl.optimal;
9369
9370         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
9371
9372         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
9373
9374         if (INTEL_GEN(dev_priv) >= 11 &&
9375             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
9376                 drm_err(&dev_priv->drm,
9377                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
9378                         dev_priv->dbuf.enabled_slices,
9379                         hw_enabled_slices);
9380
9381         /* planes */
9382         for_each_universal_plane(dev_priv, pipe, plane) {
9383                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
9384
9385                 hw_plane_wm = &hw->wm.planes[plane];
9386                 sw_plane_wm = &sw_wm->planes[plane];
9387
9388                 /* Watermarks */
9389                 for (level = 0; level <= max_level; level++) {
9390                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
9391                                                 &sw_plane_wm->wm[level]) ||
9392                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
9393                                                                &sw_plane_wm->sagv_wm0)))
9394                                 continue;
9395
9396                         drm_err(&dev_priv->drm,
9397                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9398                                 pipe_name(pipe), plane + 1, level,
9399                                 sw_plane_wm->wm[level].plane_en,
9400                                 sw_plane_wm->wm[level].plane_res_b,
9401                                 sw_plane_wm->wm[level].plane_res_l,
9402                                 hw_plane_wm->wm[level].plane_en,
9403                                 hw_plane_wm->wm[level].plane_res_b,
9404                                 hw_plane_wm->wm[level].plane_res_l);
9405                 }
9406
9407                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
9408                                          &sw_plane_wm->trans_wm)) {
9409                         drm_err(&dev_priv->drm,
9410                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9411                                 pipe_name(pipe), plane + 1,
9412                                 sw_plane_wm->trans_wm.plane_en,
9413                                 sw_plane_wm->trans_wm.plane_res_b,
9414                                 sw_plane_wm->trans_wm.plane_res_l,
9415                                 hw_plane_wm->trans_wm.plane_en,
9416                                 hw_plane_wm->trans_wm.plane_res_b,
9417                                 hw_plane_wm->trans_wm.plane_res_l);
9418                 }
9419
9420                 /* DDB */
9421                 hw_ddb_entry = &hw->ddb_y[plane];
9422                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
9423
9424                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
9425                         drm_err(&dev_priv->drm,
9426                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
9427                                 pipe_name(pipe), plane + 1,
9428                                 sw_ddb_entry->start, sw_ddb_entry->end,
9429                                 hw_ddb_entry->start, hw_ddb_entry->end);
9430                 }
9431         }
9432
9433         /*
9434          * cursor
9435          * If the cursor plane isn't active, we may not have updated it's ddb
9436          * allocation. In that case since the ddb allocation will be updated
9437          * once the plane becomes visible, we can skip this check
9438          */
9439         if (1) {
9440                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
9441
9442                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
9443                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
9444
9445                 /* Watermarks */
9446                 for (level = 0; level <= max_level; level++) {
9447                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
9448                                                 &sw_plane_wm->wm[level]) ||
9449                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
9450                                                                &sw_plane_wm->sagv_wm0)))
9451                                 continue;
9452
9453                         drm_err(&dev_priv->drm,
9454                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9455                                 pipe_name(pipe), level,
9456                                 sw_plane_wm->wm[level].plane_en,
9457                                 sw_plane_wm->wm[level].plane_res_b,
9458                                 sw_plane_wm->wm[level].plane_res_l,
9459                                 hw_plane_wm->wm[level].plane_en,
9460                                 hw_plane_wm->wm[level].plane_res_b,
9461                                 hw_plane_wm->wm[level].plane_res_l);
9462                 }
9463
9464                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
9465                                          &sw_plane_wm->trans_wm)) {
9466                         drm_err(&dev_priv->drm,
9467                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9468                                 pipe_name(pipe),
9469                                 sw_plane_wm->trans_wm.plane_en,
9470                                 sw_plane_wm->trans_wm.plane_res_b,
9471                                 sw_plane_wm->trans_wm.plane_res_l,
9472                                 hw_plane_wm->trans_wm.plane_en,
9473                                 hw_plane_wm->trans_wm.plane_res_b,
9474                                 hw_plane_wm->trans_wm.plane_res_l);
9475                 }
9476
9477                 /* DDB */
9478                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
9479                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
9480
9481                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
9482                         drm_err(&dev_priv->drm,
9483                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
9484                                 pipe_name(pipe),
9485                                 sw_ddb_entry->start, sw_ddb_entry->end,
9486                                 hw_ddb_entry->start, hw_ddb_entry->end);
9487                 }
9488         }
9489
9490         kfree(hw);
9491 }
9492
9493 static void
9494 verify_connector_state(struct intel_atomic_state *state,
9495                        struct intel_crtc *crtc)
9496 {
9497         struct drm_connector *connector;
9498         struct drm_connector_state *new_conn_state;
9499         int i;
9500
9501         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
9502                 struct drm_encoder *encoder = connector->encoder;
9503                 struct intel_crtc_state *crtc_state = NULL;
9504
9505                 if (new_conn_state->crtc != &crtc->base)
9506                         continue;
9507
9508                 if (crtc)
9509                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
9510
9511                 intel_connector_verify_state(crtc_state, new_conn_state);
9512
9513                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
9514                      "connector's atomic encoder doesn't match legacy encoder\n");
9515         }
9516 }
9517
9518 static void
9519 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
9520 {
9521         struct intel_encoder *encoder;
9522         struct drm_connector *connector;
9523         struct drm_connector_state *old_conn_state, *new_conn_state;
9524         int i;
9525
9526         for_each_intel_encoder(&dev_priv->drm, encoder) {
9527                 bool enabled = false, found = false;
9528                 enum pipe pipe;
9529
9530                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
9531                             encoder->base.base.id,
9532                             encoder->base.name);
9533
9534                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
9535                                                    new_conn_state, i) {
9536                         if (old_conn_state->best_encoder == &encoder->base)
9537                                 found = true;
9538
9539                         if (new_conn_state->best_encoder != &encoder->base)
9540                                 continue;
9541                         found = enabled = true;
9542
9543                         I915_STATE_WARN(new_conn_state->crtc !=
9544                                         encoder->base.crtc,
9545                              "connector's crtc doesn't match encoder crtc\n");
9546                 }
9547
9548                 if (!found)
9549                         continue;
9550
9551                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
9552                      "encoder's enabled state mismatch "
9553                      "(expected %i, found %i)\n",
9554                      !!encoder->base.crtc, enabled);
9555
9556                 if (!encoder->base.crtc) {
9557                         bool active;
9558
9559                         active = encoder->get_hw_state(encoder, &pipe);
9560                         I915_STATE_WARN(active,
9561                              "encoder detached but still enabled on pipe %c.\n",
9562                              pipe_name(pipe));
9563                 }
9564         }
9565 }
9566
9567 static void
9568 verify_crtc_state(struct intel_crtc *crtc,
9569                   struct intel_crtc_state *old_crtc_state,
9570                   struct intel_crtc_state *new_crtc_state)
9571 {
9572         struct drm_device *dev = crtc->base.dev;
9573         struct drm_i915_private *dev_priv = to_i915(dev);
9574         struct intel_encoder *encoder;
9575         struct intel_crtc_state *pipe_config = old_crtc_state;
9576         struct drm_atomic_state *state = old_crtc_state->uapi.state;
9577         struct intel_crtc *master = crtc;
9578
9579         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
9580         intel_crtc_free_hw_state(old_crtc_state);
9581         intel_crtc_state_reset(old_crtc_state, crtc);
9582         old_crtc_state->uapi.state = state;
9583
9584         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
9585                     crtc->base.name);
9586
9587         pipe_config->hw.enable = new_crtc_state->hw.enable;
9588
9589         intel_crtc_get_pipe_config(pipe_config);
9590
9591         /* we keep both pipes enabled on 830 */
9592         if (IS_I830(dev_priv) && pipe_config->hw.active)
9593                 pipe_config->hw.active = new_crtc_state->hw.active;
9594
9595         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
9596                         "crtc active state doesn't match with hw state "
9597                         "(expected %i, found %i)\n",
9598                         new_crtc_state->hw.active, pipe_config->hw.active);
9599
9600         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
9601                         "transitional active state does not match atomic hw state "
9602                         "(expected %i, found %i)\n",
9603                         new_crtc_state->hw.active, crtc->active);
9604
9605         if (new_crtc_state->bigjoiner_slave)
9606                 master = new_crtc_state->bigjoiner_linked_crtc;
9607
9608         for_each_encoder_on_crtc(dev, &master->base, encoder) {
9609                 enum pipe pipe;
9610                 bool active;
9611
9612                 active = encoder->get_hw_state(encoder, &pipe);
9613                 I915_STATE_WARN(active != new_crtc_state->hw.active,
9614                                 "[ENCODER:%i] active %i with crtc active %i\n",
9615                                 encoder->base.base.id, active,
9616                                 new_crtc_state->hw.active);
9617
9618                 I915_STATE_WARN(active && master->pipe != pipe,
9619                                 "Encoder connected to wrong pipe %c\n",
9620                                 pipe_name(pipe));
9621
9622                 if (active)
9623                         intel_encoder_get_config(encoder, pipe_config);
9624         }
9625
9626         if (!new_crtc_state->hw.active)
9627                 return;
9628
9629         intel_pipe_config_sanity_check(dev_priv, pipe_config);
9630
9631         if (!intel_pipe_config_compare(new_crtc_state,
9632                                        pipe_config, false)) {
9633                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
9634                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
9635                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
9636         }
9637 }
9638
9639 static void
9640 intel_verify_planes(struct intel_atomic_state *state)
9641 {
9642         struct intel_plane *plane;
9643         const struct intel_plane_state *plane_state;
9644         int i;
9645
9646         for_each_new_intel_plane_in_state(state, plane,
9647                                           plane_state, i)
9648                 assert_plane(plane, plane_state->planar_slave ||
9649                              plane_state->uapi.visible);
9650 }
9651
9652 static void
9653 verify_single_dpll_state(struct drm_i915_private *dev_priv,
9654                          struct intel_shared_dpll *pll,
9655                          struct intel_crtc *crtc,
9656                          struct intel_crtc_state *new_crtc_state)
9657 {
9658         struct intel_dpll_hw_state dpll_hw_state;
9659         unsigned int crtc_mask;
9660         bool active;
9661
9662         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9663
9664         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
9665
9666         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
9667
9668         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
9669                 I915_STATE_WARN(!pll->on && pll->active_mask,
9670                      "pll in active use but not on in sw tracking\n");
9671                 I915_STATE_WARN(pll->on && !pll->active_mask,
9672                      "pll is on but not used by any active crtc\n");
9673                 I915_STATE_WARN(pll->on != active,
9674                      "pll on state mismatch (expected %i, found %i)\n",
9675                      pll->on, active);
9676         }
9677
9678         if (!crtc) {
9679                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
9680                                 "more active pll users than references: %x vs %x\n",
9681                                 pll->active_mask, pll->state.crtc_mask);
9682
9683                 return;
9684         }
9685
9686         crtc_mask = drm_crtc_mask(&crtc->base);
9687
9688         if (new_crtc_state->hw.active)
9689                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
9690                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
9691                                 pipe_name(crtc->pipe), pll->active_mask);
9692         else
9693                 I915_STATE_WARN(pll->active_mask & crtc_mask,
9694                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
9695                                 pipe_name(crtc->pipe), pll->active_mask);
9696
9697         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
9698                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
9699                         crtc_mask, pll->state.crtc_mask);
9700
9701         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
9702                                           &dpll_hw_state,
9703                                           sizeof(dpll_hw_state)),
9704                         "pll hw state mismatch\n");
9705 }
9706
9707 static void
9708 verify_shared_dpll_state(struct intel_crtc *crtc,
9709                          struct intel_crtc_state *old_crtc_state,
9710                          struct intel_crtc_state *new_crtc_state)
9711 {
9712         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9713
9714         if (new_crtc_state->shared_dpll)
9715                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
9716
9717         if (old_crtc_state->shared_dpll &&
9718             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
9719                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
9720                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
9721
9722                 I915_STATE_WARN(pll->active_mask & crtc_mask,
9723                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
9724                                 pipe_name(crtc->pipe));
9725                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
9726                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
9727                                 pipe_name(crtc->pipe));
9728         }
9729 }
9730
9731 static void
9732 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9733                           struct intel_atomic_state *state,
9734                           struct intel_crtc_state *old_crtc_state,
9735                           struct intel_crtc_state *new_crtc_state)
9736 {
9737         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9738                 return;
9739
9740         verify_wm_state(crtc, new_crtc_state);
9741         verify_connector_state(state, crtc);
9742         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9743         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9744 }
9745
9746 static void
9747 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9748 {
9749         int i;
9750
9751         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9752                 verify_single_dpll_state(dev_priv,
9753                                          &dev_priv->dpll.shared_dplls[i],
9754                                          NULL, NULL);
9755 }
9756
9757 static void
9758 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9759                               struct intel_atomic_state *state)
9760 {
9761         verify_encoder_state(dev_priv, state);
9762         verify_connector_state(state, NULL);
9763         verify_disabled_dpll_state(dev_priv);
9764 }
9765
9766 static void
9767 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9768 {
9769         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9770         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9771         struct drm_display_mode adjusted_mode =
9772                 crtc_state->hw.adjusted_mode;
9773
9774         if (crtc_state->vrr.enable) {
9775                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9776                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9777                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9778                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9779         }
9780
9781         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9782
9783         crtc->mode_flags = crtc_state->mode_flags;
9784
9785         /*
9786          * The scanline counter increments at the leading edge of hsync.
9787          *
9788          * On most platforms it starts counting from vtotal-1 on the
9789          * first active line. That means the scanline counter value is
9790          * always one less than what we would expect. Ie. just after
9791          * start of vblank, which also occurs at start of hsync (on the
9792          * last active line), the scanline counter will read vblank_start-1.
9793          *
9794          * On gen2 the scanline counter starts counting from 1 instead
9795          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9796          * to keep the value positive), instead of adding one.
9797          *
9798          * On HSW+ the behaviour of the scanline counter depends on the output
9799          * type. For DP ports it behaves like most other platforms, but on HDMI
9800          * there's an extra 1 line difference. So we need to add two instead of
9801          * one to the value.
9802          *
9803          * On VLV/CHV DSI the scanline counter would appear to increment
9804          * approx. 1/3 of a scanline before start of vblank. Unfortunately
9805          * that means we can't tell whether we're in vblank or not while
9806          * we're on that particular line. We must still set scanline_offset
9807          * to 1 so that the vblank timestamps come out correct when we query
9808          * the scanline counter from within the vblank interrupt handler.
9809          * However if queried just before the start of vblank we'll get an
9810          * answer that's slightly in the future.
9811          */
9812         if (IS_GEN(dev_priv, 2)) {
9813                 int vtotal;
9814
9815                 vtotal = adjusted_mode.crtc_vtotal;
9816                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9817                         vtotal /= 2;
9818
9819                 crtc->scanline_offset = vtotal - 1;
9820         } else if (HAS_DDI(dev_priv) &&
9821                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9822                 crtc->scanline_offset = 2;
9823         } else {
9824                 crtc->scanline_offset = 1;
9825         }
9826 }
9827
9828 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9829 {
9830         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9831         struct intel_crtc_state *new_crtc_state;
9832         struct intel_crtc *crtc;
9833         int i;
9834
9835         if (!dev_priv->display.crtc_compute_clock)
9836                 return;
9837
9838         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9839                 if (!intel_crtc_needs_modeset(new_crtc_state))
9840                         continue;
9841
9842                 intel_release_shared_dplls(state, crtc);
9843         }
9844 }
9845
9846 /*
9847  * This implements the workaround described in the "notes" section of the mode
9848  * set sequence documentation. When going from no pipes or single pipe to
9849  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9850  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9851  */
9852 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9853 {
9854         struct intel_crtc_state *crtc_state;
9855         struct intel_crtc *crtc;
9856         struct intel_crtc_state *first_crtc_state = NULL;
9857         struct intel_crtc_state *other_crtc_state = NULL;
9858         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9859         int i;
9860
9861         /* look at all crtc's that are going to be enabled in during modeset */
9862         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9863                 if (!crtc_state->hw.active ||
9864                     !intel_crtc_needs_modeset(crtc_state))
9865                         continue;
9866
9867                 if (first_crtc_state) {
9868                         other_crtc_state = crtc_state;
9869                         break;
9870                 } else {
9871                         first_crtc_state = crtc_state;
9872                         first_pipe = crtc->pipe;
9873                 }
9874         }
9875
9876         /* No workaround needed? */
9877         if (!first_crtc_state)
9878                 return 0;
9879
9880         /* w/a possibly needed, check how many crtc's are already enabled. */
9881         for_each_intel_crtc(state->base.dev, crtc) {
9882                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9883                 if (IS_ERR(crtc_state))
9884                         return PTR_ERR(crtc_state);
9885
9886                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9887
9888                 if (!crtc_state->hw.active ||
9889                     intel_crtc_needs_modeset(crtc_state))
9890                         continue;
9891
9892                 /* 2 or more enabled crtcs means no need for w/a */
9893                 if (enabled_pipe != INVALID_PIPE)
9894                         return 0;
9895
9896                 enabled_pipe = crtc->pipe;
9897         }
9898
9899         if (enabled_pipe != INVALID_PIPE)
9900                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9901         else if (other_crtc_state)
9902                 other_crtc_state->hsw_workaround_pipe = first_pipe;
9903
9904         return 0;
9905 }
9906
9907 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9908                            u8 active_pipes)
9909 {
9910         const struct intel_crtc_state *crtc_state;
9911         struct intel_crtc *crtc;
9912         int i;
9913
9914         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9915                 if (crtc_state->hw.active)
9916                         active_pipes |= BIT(crtc->pipe);
9917                 else
9918                         active_pipes &= ~BIT(crtc->pipe);
9919         }
9920
9921         return active_pipes;
9922 }
9923
9924 static int intel_modeset_checks(struct intel_atomic_state *state)
9925 {
9926         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9927
9928         state->modeset = true;
9929
9930         if (IS_HASWELL(dev_priv))
9931                 return hsw_mode_set_planes_workaround(state);
9932
9933         return 0;
9934 }
9935
9936 /*
9937  * Handle calculation of various watermark data at the end of the atomic check
9938  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9939  * handlers to ensure that all derived state has been updated.
9940  */
9941 static int calc_watermark_data(struct intel_atomic_state *state)
9942 {
9943         struct drm_device *dev = state->base.dev;
9944         struct drm_i915_private *dev_priv = to_i915(dev);
9945
9946         /* Is there platform-specific watermark information to calculate? */
9947         if (dev_priv->display.compute_global_watermarks)
9948                 return dev_priv->display.compute_global_watermarks(state);
9949
9950         return 0;
9951 }
9952
9953 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9954                                      struct intel_crtc_state *new_crtc_state)
9955 {
9956         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9957                 return;
9958
9959         new_crtc_state->uapi.mode_changed = false;
9960         new_crtc_state->update_pipe = true;
9961 }
9962
9963 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9964                                     struct intel_crtc_state *new_crtc_state)
9965 {
9966         /*
9967          * If we're not doing the full modeset we want to
9968          * keep the current M/N values as they may be
9969          * sufficiently different to the computed values
9970          * to cause problems.
9971          *
9972          * FIXME: should really copy more fuzzy state here
9973          */
9974         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9975         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9976         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9977         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9978 }
9979
9980 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9981                                           struct intel_crtc *crtc,
9982                                           u8 plane_ids_mask)
9983 {
9984         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9985         struct intel_plane *plane;
9986
9987         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9988                 struct intel_plane_state *plane_state;
9989
9990                 if ((plane_ids_mask & BIT(plane->id)) == 0)
9991                         continue;
9992
9993                 plane_state = intel_atomic_get_plane_state(state, plane);
9994                 if (IS_ERR(plane_state))
9995                         return PTR_ERR(plane_state);
9996         }
9997
9998         return 0;
9999 }
10000
10001 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
10002                                      struct intel_crtc *crtc)
10003 {
10004         const struct intel_crtc_state *old_crtc_state =
10005                 intel_atomic_get_old_crtc_state(state, crtc);
10006         const struct intel_crtc_state *new_crtc_state =
10007                 intel_atomic_get_new_crtc_state(state, crtc);
10008
10009         return intel_crtc_add_planes_to_state(state, crtc,
10010                                               old_crtc_state->enabled_planes |
10011                                               new_crtc_state->enabled_planes);
10012 }
10013
10014 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
10015 {
10016         /* See {hsw,vlv,ivb}_plane_ratio() */
10017         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
10018                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10019                 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
10020 }
10021
10022 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
10023                                            struct intel_crtc *crtc,
10024                                            struct intel_crtc *other)
10025 {
10026         const struct intel_plane_state *plane_state;
10027         struct intel_plane *plane;
10028         u8 plane_ids = 0;
10029         int i;
10030
10031         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10032                 if (plane->pipe == crtc->pipe)
10033                         plane_ids |= BIT(plane->id);
10034         }
10035
10036         return intel_crtc_add_planes_to_state(state, other, plane_ids);
10037 }
10038
10039 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
10040 {
10041         const struct intel_crtc_state *crtc_state;
10042         struct intel_crtc *crtc;
10043         int i;
10044
10045         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10046                 int ret;
10047
10048                 if (!crtc_state->bigjoiner)
10049                         continue;
10050
10051                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
10052                                                       crtc_state->bigjoiner_linked_crtc);
10053                 if (ret)
10054                         return ret;
10055         }
10056
10057         return 0;
10058 }
10059
10060 static int intel_atomic_check_planes(struct intel_atomic_state *state)
10061 {
10062         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10063         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10064         struct intel_plane_state *plane_state;
10065         struct intel_plane *plane;
10066         struct intel_crtc *crtc;
10067         int i, ret;
10068
10069         ret = icl_add_linked_planes(state);
10070         if (ret)
10071                 return ret;
10072
10073         ret = intel_bigjoiner_add_affected_planes(state);
10074         if (ret)
10075                 return ret;
10076
10077         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10078                 ret = intel_plane_atomic_check(state, plane);
10079                 if (ret) {
10080                         drm_dbg_atomic(&dev_priv->drm,
10081                                        "[PLANE:%d:%s] atomic driver check failed\n",
10082                                        plane->base.base.id, plane->base.name);
10083                         return ret;
10084                 }
10085         }
10086
10087         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10088                                             new_crtc_state, i) {
10089                 u8 old_active_planes, new_active_planes;
10090
10091                 ret = icl_check_nv12_planes(new_crtc_state);
10092                 if (ret)
10093                         return ret;
10094
10095                 /*
10096                  * On some platforms the number of active planes affects
10097                  * the planes' minimum cdclk calculation. Add such planes
10098                  * to the state before we compute the minimum cdclk.
10099                  */
10100                 if (!active_planes_affects_min_cdclk(dev_priv))
10101                         continue;
10102
10103                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
10104                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
10105
10106                 /*
10107                  * Not only the number of planes, but if the plane configuration had
10108                  * changed might already mean we need to recompute min CDCLK,
10109                  * because different planes might consume different amount of Dbuf bandwidth
10110                  * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
10111                  */
10112                 if (old_active_planes == new_active_planes)
10113                         continue;
10114
10115                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
10116                 if (ret)
10117                         return ret;
10118         }
10119
10120         return 0;
10121 }
10122
10123 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
10124                                     bool *need_cdclk_calc)
10125 {
10126         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10127         const struct intel_cdclk_state *old_cdclk_state;
10128         const struct intel_cdclk_state *new_cdclk_state;
10129         struct intel_plane_state *plane_state;
10130         struct intel_bw_state *new_bw_state;
10131         struct intel_plane *plane;
10132         int min_cdclk = 0;
10133         enum pipe pipe;
10134         int ret;
10135         int i;
10136         /*
10137          * active_planes bitmask has been updated, and potentially
10138          * affected planes are part of the state. We can now
10139          * compute the minimum cdclk for each plane.
10140          */
10141         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10142                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
10143                 if (ret)
10144                         return ret;
10145         }
10146
10147         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
10148         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
10149
10150         if (new_cdclk_state &&
10151             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
10152                 *need_cdclk_calc = true;
10153
10154         ret = dev_priv->display.bw_calc_min_cdclk(state);
10155         if (ret)
10156                 return ret;
10157
10158         new_bw_state = intel_atomic_get_new_bw_state(state);
10159
10160         if (!new_cdclk_state || !new_bw_state)
10161                 return 0;
10162
10163         for_each_pipe(dev_priv, pipe) {
10164                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
10165
10166                 /*
10167                  * Currently do this change only if we need to increase
10168                  */
10169                 if (new_bw_state->min_cdclk > min_cdclk)
10170                         *need_cdclk_calc = true;
10171         }
10172
10173         return 0;
10174 }
10175
10176 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
10177 {
10178         struct intel_crtc_state *crtc_state;
10179         struct intel_crtc *crtc;
10180         int i;
10181
10182         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10183                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
10184                 int ret;
10185
10186                 ret = intel_crtc_atomic_check(state, crtc);
10187                 if (ret) {
10188                         drm_dbg_atomic(&i915->drm,
10189                                        "[CRTC:%d:%s] atomic driver check failed\n",
10190                                        crtc->base.base.id, crtc->base.name);
10191                         return ret;
10192                 }
10193         }
10194
10195         return 0;
10196 }
10197
10198 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
10199                                                u8 transcoders)
10200 {
10201         const struct intel_crtc_state *new_crtc_state;
10202         struct intel_crtc *crtc;
10203         int i;
10204
10205         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10206                 if (new_crtc_state->hw.enable &&
10207                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
10208                     intel_crtc_needs_modeset(new_crtc_state))
10209                         return true;
10210         }
10211
10212         return false;
10213 }
10214
10215 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
10216                                         struct intel_crtc *crtc,
10217                                         struct intel_crtc_state *old_crtc_state,
10218                                         struct intel_crtc_state *new_crtc_state)
10219 {
10220         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10221         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
10222         struct intel_crtc *slave, *master;
10223
10224         /* slave being enabled, is master is still claiming this crtc? */
10225         if (old_crtc_state->bigjoiner_slave) {
10226                 slave = crtc;
10227                 master = old_crtc_state->bigjoiner_linked_crtc;
10228                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
10229                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
10230                         goto claimed;
10231         }
10232
10233         if (!new_crtc_state->bigjoiner)
10234                 return 0;
10235
10236         if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
10237                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
10238                               "CRTC + 1 to be used, doesn't exist\n",
10239                               crtc->base.base.id, crtc->base.name);
10240                 return -EINVAL;
10241         }
10242
10243         slave = new_crtc_state->bigjoiner_linked_crtc =
10244                 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
10245         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
10246         master = crtc;
10247         if (IS_ERR(slave_crtc_state))
10248                 return PTR_ERR(slave_crtc_state);
10249
10250         /* master being enabled, slave was already configured? */
10251         if (slave_crtc_state->uapi.enable)
10252                 goto claimed;
10253
10254         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
10255                       slave->base.base.id, slave->base.name);
10256
10257         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
10258
10259 claimed:
10260         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
10261                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
10262                       slave->base.base.id, slave->base.name,
10263                       master->base.base.id, master->base.name);
10264         return -EINVAL;
10265 }
10266
10267 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
10268                                  struct intel_crtc_state *master_crtc_state)
10269 {
10270         struct intel_crtc_state *slave_crtc_state =
10271                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
10272
10273         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
10274         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
10275         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
10276         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
10277 }
10278
10279 /**
10280  * DOC: asynchronous flip implementation
10281  *
10282  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
10283  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
10284  * Correspondingly, support is currently added for primary plane only.
10285  *
10286  * Async flip can only change the plane surface address, so anything else
10287  * changing is rejected from the intel_atomic_check_async() function.
10288  * Once this check is cleared, flip done interrupt is enabled using
10289  * the intel_crtc_enable_flip_done() function.
10290  *
10291  * As soon as the surface address register is written, flip done interrupt is
10292  * generated and the requested events are sent to the usersapce in the interrupt
10293  * handler itself. The timestamp and sequence sent during the flip done event
10294  * correspond to the last vblank and have no relation to the actual time when
10295  * the flip done event was sent.
10296  */
10297 static int intel_atomic_check_async(struct intel_atomic_state *state)
10298 {
10299         struct drm_i915_private *i915 = to_i915(state->base.dev);
10300         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10301         const struct intel_plane_state *new_plane_state, *old_plane_state;
10302         struct intel_crtc *crtc;
10303         struct intel_plane *plane;
10304         int i;
10305
10306         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10307                                             new_crtc_state, i) {
10308                 if (intel_crtc_needs_modeset(new_crtc_state)) {
10309                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
10310                         return -EINVAL;
10311                 }
10312
10313                 if (!new_crtc_state->hw.active) {
10314                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
10315                         return -EINVAL;
10316                 }
10317                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
10318                         drm_dbg_kms(&i915->drm,
10319                                     "Active planes cannot be changed during async flip\n");
10320                         return -EINVAL;
10321                 }
10322         }
10323
10324         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10325                                              new_plane_state, i) {
10326                 /*
10327                  * TODO: Async flip is only supported through the page flip IOCTL
10328                  * as of now. So support currently added for primary plane only.
10329                  * Support for other planes on platforms on which supports
10330                  * this(vlv/chv and icl+) should be added when async flip is
10331                  * enabled in the atomic IOCTL path.
10332                  */
10333                 if (!plane->async_flip)
10334                         return -EINVAL;
10335
10336                 /*
10337                  * FIXME: This check is kept generic for all platforms.
10338                  * Need to verify this for all gen9 and gen10 platforms to enable
10339                  * this selectively if required.
10340                  */
10341                 switch (new_plane_state->hw.fb->modifier) {
10342                 case I915_FORMAT_MOD_X_TILED:
10343                 case I915_FORMAT_MOD_Y_TILED:
10344                 case I915_FORMAT_MOD_Yf_TILED:
10345                         break;
10346                 default:
10347                         drm_dbg_kms(&i915->drm,
10348                                     "Linear memory/CCS does not support async flips\n");
10349                         return -EINVAL;
10350                 }
10351
10352                 if (old_plane_state->color_plane[0].stride !=
10353                     new_plane_state->color_plane[0].stride) {
10354                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
10355                         return -EINVAL;
10356                 }
10357
10358                 if (old_plane_state->hw.fb->modifier !=
10359                     new_plane_state->hw.fb->modifier) {
10360                         drm_dbg_kms(&i915->drm,
10361                                     "Framebuffer modifiers cannot be changed in async flip\n");
10362                         return -EINVAL;
10363                 }
10364
10365                 if (old_plane_state->hw.fb->format !=
10366                     new_plane_state->hw.fb->format) {
10367                         drm_dbg_kms(&i915->drm,
10368                                     "Framebuffer format cannot be changed in async flip\n");
10369                         return -EINVAL;
10370                 }
10371
10372                 if (old_plane_state->hw.rotation !=
10373                     new_plane_state->hw.rotation) {
10374                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
10375                         return -EINVAL;
10376                 }
10377
10378                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
10379                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
10380                         drm_dbg_kms(&i915->drm,
10381                                     "Plane size/co-ordinates cannot be changed in async flip\n");
10382                         return -EINVAL;
10383                 }
10384
10385                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
10386                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
10387                         return -EINVAL;
10388                 }
10389
10390                 if (old_plane_state->hw.pixel_blend_mode !=
10391                     new_plane_state->hw.pixel_blend_mode) {
10392                         drm_dbg_kms(&i915->drm,
10393                                     "Pixel blend mode cannot be changed in async flip\n");
10394                         return -EINVAL;
10395                 }
10396
10397                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
10398                         drm_dbg_kms(&i915->drm,
10399                                     "Color encoding cannot be changed in async flip\n");
10400                         return -EINVAL;
10401                 }
10402
10403                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
10404                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
10405                         return -EINVAL;
10406                 }
10407         }
10408
10409         return 0;
10410 }
10411
10412 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
10413 {
10414         struct intel_crtc_state *crtc_state;
10415         struct intel_crtc *crtc;
10416         int i;
10417
10418         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10419                 struct intel_crtc_state *linked_crtc_state;
10420                 struct intel_crtc *linked_crtc;
10421                 int ret;
10422
10423                 if (!crtc_state->bigjoiner)
10424                         continue;
10425
10426                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
10427                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
10428                 if (IS_ERR(linked_crtc_state))
10429                         return PTR_ERR(linked_crtc_state);
10430
10431                 if (!intel_crtc_needs_modeset(crtc_state))
10432                         continue;
10433
10434                 linked_crtc_state->uapi.mode_changed = true;
10435
10436                 ret = drm_atomic_add_affected_connectors(&state->base,
10437                                                          &linked_crtc->base);
10438                 if (ret)
10439                         return ret;
10440
10441                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
10442                 if (ret)
10443                         return ret;
10444         }
10445
10446         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10447                 /* Kill old bigjoiner link, we may re-establish afterwards */
10448                 if (intel_crtc_needs_modeset(crtc_state) &&
10449                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
10450                         kill_bigjoiner_slave(state, crtc_state);
10451         }
10452
10453         return 0;
10454 }
10455
10456 /**
10457  * intel_atomic_check - validate state object
10458  * @dev: drm device
10459  * @_state: state to validate
10460  */
10461 static int intel_atomic_check(struct drm_device *dev,
10462                               struct drm_atomic_state *_state)
10463 {
10464         struct drm_i915_private *dev_priv = to_i915(dev);
10465         struct intel_atomic_state *state = to_intel_atomic_state(_state);
10466         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10467         struct intel_crtc *crtc;
10468         int ret, i;
10469         bool any_ms = false;
10470
10471         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10472                                             new_crtc_state, i) {
10473                 if (new_crtc_state->inherited != old_crtc_state->inherited)
10474                         new_crtc_state->uapi.mode_changed = true;
10475         }
10476
10477         intel_vrr_check_modeset(state);
10478
10479         ret = drm_atomic_helper_check_modeset(dev, &state->base);
10480         if (ret)
10481                 goto fail;
10482
10483         ret = intel_bigjoiner_add_affected_crtcs(state);
10484         if (ret)
10485                 goto fail;
10486
10487         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10488                                             new_crtc_state, i) {
10489                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10490                         /* Light copy */
10491                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
10492
10493                         continue;
10494                 }
10495
10496                 if (!new_crtc_state->uapi.enable) {
10497                         if (!new_crtc_state->bigjoiner_slave) {
10498                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
10499                                 any_ms = true;
10500                         }
10501                         continue;
10502                 }
10503
10504                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
10505                 if (ret)
10506                         goto fail;
10507
10508                 ret = intel_modeset_pipe_config(state, new_crtc_state);
10509                 if (ret)
10510                         goto fail;
10511
10512                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
10513                                                    new_crtc_state);
10514                 if (ret)
10515                         goto fail;
10516         }
10517
10518         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10519                                             new_crtc_state, i) {
10520                 if (!intel_crtc_needs_modeset(new_crtc_state))
10521                         continue;
10522
10523                 ret = intel_modeset_pipe_config_late(new_crtc_state);
10524                 if (ret)
10525                         goto fail;
10526
10527                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
10528         }
10529
10530         /**
10531          * Check if fastset is allowed by external dependencies like other
10532          * pipes and transcoders.
10533          *
10534          * Right now it only forces a fullmodeset when the MST master
10535          * transcoder did not changed but the pipe of the master transcoder
10536          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
10537          * in case of port synced crtcs, if one of the synced crtcs
10538          * needs a full modeset, all other synced crtcs should be
10539          * forced a full modeset.
10540          */
10541         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10542                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
10543                         continue;
10544
10545                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
10546                         enum transcoder master = new_crtc_state->mst_master_transcoder;
10547
10548                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
10549                                 new_crtc_state->uapi.mode_changed = true;
10550                                 new_crtc_state->update_pipe = false;
10551                         }
10552                 }
10553
10554                 if (is_trans_port_sync_mode(new_crtc_state)) {
10555                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
10556
10557                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
10558                                 trans |= BIT(new_crtc_state->master_transcoder);
10559
10560                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
10561                                 new_crtc_state->uapi.mode_changed = true;
10562                                 new_crtc_state->update_pipe = false;
10563                         }
10564                 }
10565
10566                 if (new_crtc_state->bigjoiner) {
10567                         struct intel_crtc_state *linked_crtc_state =
10568                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
10569
10570                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
10571                                 new_crtc_state->uapi.mode_changed = true;
10572                                 new_crtc_state->update_pipe = false;
10573                         }
10574                 }
10575         }
10576
10577         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10578                                             new_crtc_state, i) {
10579                 if (intel_crtc_needs_modeset(new_crtc_state)) {
10580                         any_ms = true;
10581                         continue;
10582                 }
10583
10584                 if (!new_crtc_state->update_pipe)
10585                         continue;
10586
10587                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
10588         }
10589
10590         if (any_ms && !check_digital_port_conflicts(state)) {
10591                 drm_dbg_kms(&dev_priv->drm,
10592                             "rejecting conflicting digital port configuration\n");
10593                 ret = -EINVAL;
10594                 goto fail;
10595         }
10596
10597         ret = drm_dp_mst_atomic_check(&state->base);
10598         if (ret)
10599                 goto fail;
10600
10601         ret = intel_atomic_check_planes(state);
10602         if (ret)
10603                 goto fail;
10604
10605         intel_fbc_choose_crtc(dev_priv, state);
10606         ret = calc_watermark_data(state);
10607         if (ret)
10608                 goto fail;
10609
10610         ret = intel_bw_atomic_check(state);
10611         if (ret)
10612                 goto fail;
10613
10614         ret = intel_atomic_check_cdclk(state, &any_ms);
10615         if (ret)
10616                 goto fail;
10617
10618         if (any_ms) {
10619                 ret = intel_modeset_checks(state);
10620                 if (ret)
10621                         goto fail;
10622
10623                 ret = intel_modeset_calc_cdclk(state);
10624                 if (ret)
10625                         return ret;
10626
10627                 intel_modeset_clear_plls(state);
10628         }
10629
10630         ret = intel_atomic_check_crtcs(state);
10631         if (ret)
10632                 goto fail;
10633
10634         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10635                                             new_crtc_state, i) {
10636                 if (new_crtc_state->uapi.async_flip) {
10637                         ret = intel_atomic_check_async(state);
10638                         if (ret)
10639                                 goto fail;
10640                 }
10641
10642                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
10643                     !new_crtc_state->update_pipe)
10644                         continue;
10645
10646                 intel_dump_pipe_config(new_crtc_state, state,
10647                                        intel_crtc_needs_modeset(new_crtc_state) ?
10648                                        "[modeset]" : "[fastset]");
10649         }
10650
10651         return 0;
10652
10653  fail:
10654         if (ret == -EDEADLK)
10655                 return ret;
10656
10657         /*
10658          * FIXME would probably be nice to know which crtc specifically
10659          * caused the failure, in cases where we can pinpoint it.
10660          */
10661         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10662                                             new_crtc_state, i)
10663                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10664
10665         return ret;
10666 }
10667
10668 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10669 {
10670         struct intel_crtc_state *crtc_state;
10671         struct intel_crtc *crtc;
10672         int i, ret;
10673
10674         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10675         if (ret < 0)
10676                 return ret;
10677
10678         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10679                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10680
10681                 if (mode_changed || crtc_state->update_pipe ||
10682                     crtc_state->uapi.color_mgmt_changed) {
10683                         intel_dsb_prepare(crtc_state);
10684                 }
10685         }
10686
10687         return 0;
10688 }
10689
10690 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10691                                   struct intel_crtc_state *crtc_state)
10692 {
10693         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10694
10695         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
10696                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10697
10698         if (crtc_state->has_pch_encoder) {
10699                 enum pipe pch_transcoder =
10700                         intel_crtc_pch_transcoder(crtc);
10701
10702                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10703         }
10704 }
10705
10706 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10707                                const struct intel_crtc_state *new_crtc_state)
10708 {
10709         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10710         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10711
10712         /*
10713          * Update pipe size and adjust fitter if needed: the reason for this is
10714          * that in compute_mode_changes we check the native mode (not the pfit
10715          * mode) to see if we can flip rather than do a full mode set. In the
10716          * fastboot case, we'll flip, but if we don't update the pipesrc and
10717          * pfit state, we'll end up with a big fb scanned out into the wrong
10718          * sized surface.
10719          */
10720         intel_set_pipe_src_size(new_crtc_state);
10721
10722         /* on skylake this is done by detaching scalers */
10723         if (INTEL_GEN(dev_priv) >= 9) {
10724                 skl_detach_scalers(new_crtc_state);
10725
10726                 if (new_crtc_state->pch_pfit.enabled)
10727                         skl_pfit_enable(new_crtc_state);
10728         } else if (HAS_PCH_SPLIT(dev_priv)) {
10729                 if (new_crtc_state->pch_pfit.enabled)
10730                         ilk_pfit_enable(new_crtc_state);
10731                 else if (old_crtc_state->pch_pfit.enabled)
10732                         ilk_pfit_disable(old_crtc_state);
10733         }
10734
10735         /*
10736          * The register is supposedly single buffered so perhaps
10737          * not 100% correct to do this here. But SKL+ calculate
10738          * this based on the adjust pixel rate so pfit changes do
10739          * affect it and so it must be updated for fastsets.
10740          * HSW/BDW only really need this here for fastboot, after
10741          * that the value should not change without a full modeset.
10742          */
10743         if (INTEL_GEN(dev_priv) >= 9 ||
10744             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10745                 hsw_set_linetime_wm(new_crtc_state);
10746
10747         if (INTEL_GEN(dev_priv) >= 11)
10748                 icl_set_pipe_chicken(crtc);
10749 }
10750
10751 static void commit_pipe_config(struct intel_atomic_state *state,
10752                                struct intel_crtc *crtc)
10753 {
10754         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10755         const struct intel_crtc_state *old_crtc_state =
10756                 intel_atomic_get_old_crtc_state(state, crtc);
10757         const struct intel_crtc_state *new_crtc_state =
10758                 intel_atomic_get_new_crtc_state(state, crtc);
10759         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10760
10761         /*
10762          * During modesets pipe configuration was programmed as the
10763          * CRTC was enabled.
10764          */
10765         if (!modeset) {
10766                 if (new_crtc_state->uapi.color_mgmt_changed ||
10767                     new_crtc_state->update_pipe)
10768                         intel_color_commit(new_crtc_state);
10769
10770                 if (INTEL_GEN(dev_priv) >= 9)
10771                         skl_detach_scalers(new_crtc_state);
10772
10773                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10774                         bdw_set_pipemisc(new_crtc_state);
10775
10776                 if (new_crtc_state->update_pipe)
10777                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
10778
10779                 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10780         }
10781
10782         if (dev_priv->display.atomic_update_watermarks)
10783                 dev_priv->display.atomic_update_watermarks(state, crtc);
10784 }
10785
10786 static void intel_enable_crtc(struct intel_atomic_state *state,
10787                               struct intel_crtc *crtc)
10788 {
10789         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10790         const struct intel_crtc_state *new_crtc_state =
10791                 intel_atomic_get_new_crtc_state(state, crtc);
10792
10793         if (!intel_crtc_needs_modeset(new_crtc_state))
10794                 return;
10795
10796         intel_crtc_update_active_timings(new_crtc_state);
10797
10798         dev_priv->display.crtc_enable(state, crtc);
10799
10800         if (new_crtc_state->bigjoiner_slave)
10801                 return;
10802
10803         /* vblanks work again, re-enable pipe CRC. */
10804         intel_crtc_enable_pipe_crc(crtc);
10805 }
10806
10807 static void intel_update_crtc(struct intel_atomic_state *state,
10808                               struct intel_crtc *crtc)
10809 {
10810         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10811         const struct intel_crtc_state *old_crtc_state =
10812                 intel_atomic_get_old_crtc_state(state, crtc);
10813         struct intel_crtc_state *new_crtc_state =
10814                 intel_atomic_get_new_crtc_state(state, crtc);
10815         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10816
10817         if (!modeset) {
10818                 if (new_crtc_state->preload_luts &&
10819                     (new_crtc_state->uapi.color_mgmt_changed ||
10820                      new_crtc_state->update_pipe))
10821                         intel_color_load_luts(new_crtc_state);
10822
10823                 intel_pre_plane_update(state, crtc);
10824
10825                 if (new_crtc_state->update_pipe)
10826                         intel_encoders_update_pipe(state, crtc);
10827         }
10828
10829         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10830                 intel_fbc_disable(crtc);
10831         else
10832                 intel_fbc_enable(state, crtc);
10833
10834         /* Perform vblank evasion around commit operation */
10835         intel_pipe_update_start(new_crtc_state);
10836
10837         commit_pipe_config(state, crtc);
10838
10839         if (INTEL_GEN(dev_priv) >= 9)
10840                 skl_update_planes_on_crtc(state, crtc);
10841         else
10842                 i9xx_update_planes_on_crtc(state, crtc);
10843
10844         intel_pipe_update_end(new_crtc_state);
10845
10846         /*
10847          * We usually enable FIFO underrun interrupts as part of the
10848          * CRTC enable sequence during modesets.  But when we inherit a
10849          * valid pipe configuration from the BIOS we need to take care
10850          * of enabling them on the CRTC's first fastset.
10851          */
10852         if (new_crtc_state->update_pipe && !modeset &&
10853             old_crtc_state->inherited)
10854                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10855 }
10856
10857 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10858                                           struct intel_crtc_state *old_crtc_state,
10859                                           struct intel_crtc_state *new_crtc_state,
10860                                           struct intel_crtc *crtc)
10861 {
10862         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10863
10864         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10865
10866         intel_crtc_disable_planes(state, crtc);
10867
10868         /*
10869          * We still need special handling for disabling bigjoiner master
10870          * and slaves since for slave we do not have encoder or plls
10871          * so we dont need to disable those.
10872          */
10873         if (old_crtc_state->bigjoiner) {
10874                 intel_crtc_disable_planes(state,
10875                                           old_crtc_state->bigjoiner_linked_crtc);
10876                 old_crtc_state->bigjoiner_linked_crtc->active = false;
10877         }
10878
10879         /*
10880          * We need to disable pipe CRC before disabling the pipe,
10881          * or we race against vblank off.
10882          */
10883         intel_crtc_disable_pipe_crc(crtc);
10884
10885         dev_priv->display.crtc_disable(state, crtc);
10886         crtc->active = false;
10887         intel_fbc_disable(crtc);
10888         intel_disable_shared_dpll(old_crtc_state);
10889
10890         /* FIXME unify this for all platforms */
10891         if (!new_crtc_state->hw.active &&
10892             !HAS_GMCH(dev_priv) &&
10893             dev_priv->display.initial_watermarks)
10894                 dev_priv->display.initial_watermarks(state, crtc);
10895 }
10896
10897 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10898 {
10899         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10900         struct intel_crtc *crtc;
10901         u32 handled = 0;
10902         int i;
10903
10904         /* Only disable port sync and MST slaves */
10905         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10906                                             new_crtc_state, i) {
10907                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10908                         continue;
10909
10910                 if (!old_crtc_state->hw.active)
10911                         continue;
10912
10913                 /* In case of Transcoder port Sync master slave CRTCs can be
10914                  * assigned in any order and we need to make sure that
10915                  * slave CRTCs are disabled first and then master CRTC since
10916                  * Slave vblanks are masked till Master Vblanks.
10917                  */
10918                 if (!is_trans_port_sync_slave(old_crtc_state) &&
10919                     !intel_dp_mst_is_slave_trans(old_crtc_state))
10920                         continue;
10921
10922                 intel_pre_plane_update(state, crtc);
10923                 intel_old_crtc_state_disables(state, old_crtc_state,
10924                                               new_crtc_state, crtc);
10925                 handled |= BIT(crtc->pipe);
10926         }
10927
10928         /* Disable everything else left on */
10929         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10930                                             new_crtc_state, i) {
10931                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
10932                     (handled & BIT(crtc->pipe)) ||
10933                     old_crtc_state->bigjoiner_slave)
10934                         continue;
10935
10936                 intel_pre_plane_update(state, crtc);
10937                 if (old_crtc_state->bigjoiner) {
10938                         struct intel_crtc *slave =
10939                                 old_crtc_state->bigjoiner_linked_crtc;
10940
10941                         intel_pre_plane_update(state, slave);
10942                 }
10943
10944                 if (old_crtc_state->hw.active)
10945                         intel_old_crtc_state_disables(state, old_crtc_state,
10946                                                       new_crtc_state, crtc);
10947         }
10948 }
10949
10950 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10951 {
10952         struct intel_crtc_state *new_crtc_state;
10953         struct intel_crtc *crtc;
10954         int i;
10955
10956         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10957                 if (!new_crtc_state->hw.active)
10958                         continue;
10959
10960                 intel_enable_crtc(state, crtc);
10961                 intel_update_crtc(state, crtc);
10962         }
10963 }
10964
10965 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10966 {
10967         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10968         struct intel_crtc *crtc;
10969         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10970         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10971         u8 update_pipes = 0, modeset_pipes = 0;
10972         int i;
10973
10974         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10975                 enum pipe pipe = crtc->pipe;
10976
10977                 if (!new_crtc_state->hw.active)
10978                         continue;
10979
10980                 /* ignore allocations for crtc's that have been turned off. */
10981                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10982                         entries[pipe] = old_crtc_state->wm.skl.ddb;
10983                         update_pipes |= BIT(pipe);
10984                 } else {
10985                         modeset_pipes |= BIT(pipe);
10986                 }
10987         }
10988
10989         /*
10990          * Whenever the number of active pipes changes, we need to make sure we
10991          * update the pipes in the right order so that their ddb allocations
10992          * never overlap with each other between CRTC updates. Otherwise we'll
10993          * cause pipe underruns and other bad stuff.
10994          *
10995          * So first lets enable all pipes that do not need a fullmodeset as
10996          * those don't have any external dependency.
10997          */
10998         while (update_pipes) {
10999                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11000                                                     new_crtc_state, i) {
11001                         enum pipe pipe = crtc->pipe;
11002
11003                         if ((update_pipes & BIT(pipe)) == 0)
11004                                 continue;
11005
11006                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
11007                                                         entries, I915_MAX_PIPES, pipe))
11008                                 continue;
11009
11010                         entries[pipe] = new_crtc_state->wm.skl.ddb;
11011                         update_pipes &= ~BIT(pipe);
11012
11013                         intel_update_crtc(state, crtc);
11014
11015                         /*
11016                          * If this is an already active pipe, it's DDB changed,
11017                          * and this isn't the last pipe that needs updating
11018                          * then we need to wait for a vblank to pass for the
11019                          * new ddb allocation to take effect.
11020                          */
11021                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
11022                                                  &old_crtc_state->wm.skl.ddb) &&
11023                             (update_pipes | modeset_pipes))
11024                                 intel_wait_for_vblank(dev_priv, pipe);
11025                 }
11026         }
11027
11028         update_pipes = modeset_pipes;
11029
11030         /*
11031          * Enable all pipes that needs a modeset and do not depends on other
11032          * pipes
11033          */
11034         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11035                 enum pipe pipe = crtc->pipe;
11036
11037                 if ((modeset_pipes & BIT(pipe)) == 0)
11038                         continue;
11039
11040                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
11041                     is_trans_port_sync_master(new_crtc_state) ||
11042                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
11043                         continue;
11044
11045                 modeset_pipes &= ~BIT(pipe);
11046
11047                 intel_enable_crtc(state, crtc);
11048         }
11049
11050         /*
11051          * Then we enable all remaining pipes that depend on other
11052          * pipes: MST slaves and port sync masters, big joiner master
11053          */
11054         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11055                 enum pipe pipe = crtc->pipe;
11056
11057                 if ((modeset_pipes & BIT(pipe)) == 0)
11058                         continue;
11059
11060                 modeset_pipes &= ~BIT(pipe);
11061
11062                 intel_enable_crtc(state, crtc);
11063         }
11064
11065         /*
11066          * Finally we do the plane updates/etc. for all pipes that got enabled.
11067          */
11068         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11069                 enum pipe pipe = crtc->pipe;
11070
11071                 if ((update_pipes & BIT(pipe)) == 0)
11072                         continue;
11073
11074                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
11075                                                                         entries, I915_MAX_PIPES, pipe));
11076
11077                 entries[pipe] = new_crtc_state->wm.skl.ddb;
11078                 update_pipes &= ~BIT(pipe);
11079
11080                 intel_update_crtc(state, crtc);
11081         }
11082
11083         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
11084         drm_WARN_ON(&dev_priv->drm, update_pipes);
11085 }
11086
11087 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
11088 {
11089         struct intel_atomic_state *state, *next;
11090         struct llist_node *freed;
11091
11092         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
11093         llist_for_each_entry_safe(state, next, freed, freed)
11094                 drm_atomic_state_put(&state->base);
11095 }
11096
11097 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
11098 {
11099         struct drm_i915_private *dev_priv =
11100                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
11101
11102         intel_atomic_helper_free_state(dev_priv);
11103 }
11104
11105 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
11106 {
11107         struct wait_queue_entry wait_fence, wait_reset;
11108         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
11109
11110         init_wait_entry(&wait_fence, 0);
11111         init_wait_entry(&wait_reset, 0);
11112         for (;;) {
11113                 prepare_to_wait(&intel_state->commit_ready.wait,
11114                                 &wait_fence, TASK_UNINTERRUPTIBLE);
11115                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
11116                                               I915_RESET_MODESET),
11117                                 &wait_reset, TASK_UNINTERRUPTIBLE);
11118
11119
11120                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
11121                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
11122                         break;
11123
11124                 schedule();
11125         }
11126         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
11127         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
11128                                   I915_RESET_MODESET),
11129                     &wait_reset);
11130 }
11131
11132 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
11133 {
11134         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11135         struct intel_crtc *crtc;
11136         int i;
11137
11138         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11139                                             new_crtc_state, i)
11140                 intel_dsb_cleanup(old_crtc_state);
11141 }
11142
11143 static void intel_atomic_cleanup_work(struct work_struct *work)
11144 {
11145         struct intel_atomic_state *state =
11146                 container_of(work, struct intel_atomic_state, base.commit_work);
11147         struct drm_i915_private *i915 = to_i915(state->base.dev);
11148
11149         intel_cleanup_dsbs(state);
11150         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
11151         drm_atomic_helper_commit_cleanup_done(&state->base);
11152         drm_atomic_state_put(&state->base);
11153
11154         intel_atomic_helper_free_state(i915);
11155 }
11156
11157 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
11158 {
11159         struct drm_i915_private *i915 = to_i915(state->base.dev);
11160         struct intel_plane *plane;
11161         struct intel_plane_state *plane_state;
11162         int i;
11163
11164         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11165                 struct drm_framebuffer *fb = plane_state->hw.fb;
11166                 int ret;
11167
11168                 if (!fb ||
11169                     fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
11170                         continue;
11171
11172                 /*
11173                  * The layout of the fast clear color value expected by HW
11174                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
11175                  * - 4 x 4 bytes per-channel value
11176                  *   (in surface type specific float/int format provided by the fb user)
11177                  * - 8 bytes native color value used by the display
11178                  *   (converted/written by GPU during a fast clear operation using the
11179                  *    above per-channel values)
11180                  *
11181                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
11182                  * caller made sure that the object is synced wrt. the related color clear value
11183                  * GPU write on it.
11184                  */
11185                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
11186                                                      fb->offsets[2] + 16,
11187                                                      &plane_state->ccval,
11188                                                      sizeof(plane_state->ccval));
11189                 /* The above could only fail if the FB obj has an unexpected backing store type. */
11190                 drm_WARN_ON(&i915->drm, ret);
11191         }
11192 }
11193
11194 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
11195 {
11196         struct drm_device *dev = state->base.dev;
11197         struct drm_i915_private *dev_priv = to_i915(dev);
11198         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
11199         struct intel_crtc *crtc;
11200         u64 put_domains[I915_MAX_PIPES] = {};
11201         intel_wakeref_t wakeref = 0;
11202         int i;
11203
11204         intel_atomic_commit_fence_wait(state);
11205
11206         drm_atomic_helper_wait_for_dependencies(&state->base);
11207
11208         if (state->modeset)
11209                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
11210
11211         intel_atomic_prepare_plane_clear_colors(state);
11212
11213         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11214                                             new_crtc_state, i) {
11215                 if (intel_crtc_needs_modeset(new_crtc_state) ||
11216                     new_crtc_state->update_pipe) {
11217
11218                         put_domains[crtc->pipe] =
11219                                 modeset_get_crtc_power_domains(new_crtc_state);
11220                 }
11221         }
11222
11223         intel_commit_modeset_disables(state);
11224
11225         /* FIXME: Eventually get rid of our crtc->config pointer */
11226         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11227                 crtc->config = new_crtc_state;
11228
11229         if (state->modeset) {
11230                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
11231
11232                 intel_set_cdclk_pre_plane_update(state);
11233
11234                 intel_modeset_verify_disabled(dev_priv, state);
11235         }
11236
11237         intel_sagv_pre_plane_update(state);
11238
11239         /* Complete the events for pipes that have now been disabled */
11240         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11241                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11242
11243                 /* Complete events for now disable pipes here. */
11244                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
11245                         spin_lock_irq(&dev->event_lock);
11246                         drm_crtc_send_vblank_event(&crtc->base,
11247                                                    new_crtc_state->uapi.event);
11248                         spin_unlock_irq(&dev->event_lock);
11249
11250                         new_crtc_state->uapi.event = NULL;
11251                 }
11252         }
11253
11254         if (state->modeset)
11255                 intel_encoders_update_prepare(state);
11256
11257         intel_dbuf_pre_plane_update(state);
11258
11259         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11260                 if (new_crtc_state->uapi.async_flip)
11261                         intel_crtc_enable_flip_done(state, crtc);
11262         }
11263
11264         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
11265         dev_priv->display.commit_modeset_enables(state);
11266
11267         if (state->modeset) {
11268                 intel_encoders_update_complete(state);
11269
11270                 intel_set_cdclk_post_plane_update(state);
11271         }
11272
11273         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
11274          * already, but still need the state for the delayed optimization. To
11275          * fix this:
11276          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
11277          * - schedule that vblank worker _before_ calling hw_done
11278          * - at the start of commit_tail, cancel it _synchrously
11279          * - switch over to the vblank wait helper in the core after that since
11280          *   we don't need out special handling any more.
11281          */
11282         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
11283
11284         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11285                 if (new_crtc_state->uapi.async_flip)
11286                         intel_crtc_disable_flip_done(state, crtc);
11287
11288                 if (new_crtc_state->hw.active &&
11289                     !intel_crtc_needs_modeset(new_crtc_state) &&
11290                     !new_crtc_state->preload_luts &&
11291                     (new_crtc_state->uapi.color_mgmt_changed ||
11292                      new_crtc_state->update_pipe))
11293                         intel_color_load_luts(new_crtc_state);
11294         }
11295
11296         /*
11297          * Now that the vblank has passed, we can go ahead and program the
11298          * optimal watermarks on platforms that need two-step watermark
11299          * programming.
11300          *
11301          * TODO: Move this (and other cleanup) to an async worker eventually.
11302          */
11303         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11304                                             new_crtc_state, i) {
11305                 /*
11306                  * Gen2 reports pipe underruns whenever all planes are disabled.
11307                  * So re-enable underrun reporting after some planes get enabled.
11308                  *
11309                  * We do this before .optimize_watermarks() so that we have a
11310                  * chance of catching underruns with the intermediate watermarks
11311                  * vs. the new plane configuration.
11312                  */
11313                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
11314                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
11315
11316                 if (dev_priv->display.optimize_watermarks)
11317                         dev_priv->display.optimize_watermarks(state, crtc);
11318         }
11319
11320         intel_dbuf_post_plane_update(state);
11321
11322         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11323                 intel_post_plane_update(state, crtc);
11324
11325                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
11326
11327                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
11328
11329                 /*
11330                  * DSB cleanup is done in cleanup_work aligning with framebuffer
11331                  * cleanup. So copy and reset the dsb structure to sync with
11332                  * commit_done and later do dsb cleanup in cleanup_work.
11333                  */
11334                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
11335         }
11336
11337         /* Underruns don't always raise interrupts, so check manually */
11338         intel_check_cpu_fifo_underruns(dev_priv);
11339         intel_check_pch_fifo_underruns(dev_priv);
11340
11341         if (state->modeset)
11342                 intel_verify_planes(state);
11343
11344         intel_sagv_post_plane_update(state);
11345
11346         drm_atomic_helper_commit_hw_done(&state->base);
11347
11348         if (state->modeset) {
11349                 /* As one of the primary mmio accessors, KMS has a high
11350                  * likelihood of triggering bugs in unclaimed access. After we
11351                  * finish modesetting, see if an error has been flagged, and if
11352                  * so enable debugging for the next modeset - and hope we catch
11353                  * the culprit.
11354                  */
11355                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
11356                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
11357         }
11358         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11359
11360         /*
11361          * Defer the cleanup of the old state to a separate worker to not
11362          * impede the current task (userspace for blocking modesets) that
11363          * are executed inline. For out-of-line asynchronous modesets/flips,
11364          * deferring to a new worker seems overkill, but we would place a
11365          * schedule point (cond_resched()) here anyway to keep latencies
11366          * down.
11367          */
11368         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
11369         queue_work(system_highpri_wq, &state->base.commit_work);
11370 }
11371
11372 static void intel_atomic_commit_work(struct work_struct *work)
11373 {
11374         struct intel_atomic_state *state =
11375                 container_of(work, struct intel_atomic_state, base.commit_work);
11376
11377         intel_atomic_commit_tail(state);
11378 }
11379
11380 static int __i915_sw_fence_call
11381 intel_atomic_commit_ready(struct i915_sw_fence *fence,
11382                           enum i915_sw_fence_notify notify)
11383 {
11384         struct intel_atomic_state *state =
11385                 container_of(fence, struct intel_atomic_state, commit_ready);
11386
11387         switch (notify) {
11388         case FENCE_COMPLETE:
11389                 /* we do blocking waits in the worker, nothing to do here */
11390                 break;
11391         case FENCE_FREE:
11392                 {
11393                         struct intel_atomic_helper *helper =
11394                                 &to_i915(state->base.dev)->atomic_helper;
11395
11396                         if (llist_add(&state->freed, &helper->free_list))
11397                                 schedule_work(&helper->free_work);
11398                         break;
11399                 }
11400         }
11401
11402         return NOTIFY_DONE;
11403 }
11404
11405 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
11406 {
11407         struct intel_plane_state *old_plane_state, *new_plane_state;
11408         struct intel_plane *plane;
11409         int i;
11410
11411         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
11412                                              new_plane_state, i)
11413                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
11414                                         to_intel_frontbuffer(new_plane_state->hw.fb),
11415                                         plane->frontbuffer_bit);
11416 }
11417
11418 static int intel_atomic_commit(struct drm_device *dev,
11419                                struct drm_atomic_state *_state,
11420                                bool nonblock)
11421 {
11422         struct intel_atomic_state *state = to_intel_atomic_state(_state);
11423         struct drm_i915_private *dev_priv = to_i915(dev);
11424         int ret = 0;
11425
11426         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
11427
11428         drm_atomic_state_get(&state->base);
11429         i915_sw_fence_init(&state->commit_ready,
11430                            intel_atomic_commit_ready);
11431
11432         /*
11433          * The intel_legacy_cursor_update() fast path takes care
11434          * of avoiding the vblank waits for simple cursor
11435          * movement and flips. For cursor on/off and size changes,
11436          * we want to perform the vblank waits so that watermark
11437          * updates happen during the correct frames. Gen9+ have
11438          * double buffered watermarks and so shouldn't need this.
11439          *
11440          * Unset state->legacy_cursor_update before the call to
11441          * drm_atomic_helper_setup_commit() because otherwise
11442          * drm_atomic_helper_wait_for_flip_done() is a noop and
11443          * we get FIFO underruns because we didn't wait
11444          * for vblank.
11445          *
11446          * FIXME doing watermarks and fb cleanup from a vblank worker
11447          * (assuming we had any) would solve these problems.
11448          */
11449         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
11450                 struct intel_crtc_state *new_crtc_state;
11451                 struct intel_crtc *crtc;
11452                 int i;
11453
11454                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11455                         if (new_crtc_state->wm.need_postvbl_update ||
11456                             new_crtc_state->update_wm_post)
11457                                 state->base.legacy_cursor_update = false;
11458         }
11459
11460         ret = intel_atomic_prepare_commit(state);
11461         if (ret) {
11462                 drm_dbg_atomic(&dev_priv->drm,
11463                                "Preparing state failed with %i\n", ret);
11464                 i915_sw_fence_commit(&state->commit_ready);
11465                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11466                 return ret;
11467         }
11468
11469         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
11470         if (!ret)
11471                 ret = drm_atomic_helper_swap_state(&state->base, true);
11472         if (!ret)
11473                 intel_atomic_swap_global_state(state);
11474
11475         if (ret) {
11476                 struct intel_crtc_state *new_crtc_state;
11477                 struct intel_crtc *crtc;
11478                 int i;
11479
11480                 i915_sw_fence_commit(&state->commit_ready);
11481
11482                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11483                         intel_dsb_cleanup(new_crtc_state);
11484
11485                 drm_atomic_helper_cleanup_planes(dev, &state->base);
11486                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11487                 return ret;
11488         }
11489         intel_shared_dpll_swap_state(state);
11490         intel_atomic_track_fbs(state);
11491
11492         drm_atomic_state_get(&state->base);
11493         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
11494
11495         i915_sw_fence_commit(&state->commit_ready);
11496         if (nonblock && state->modeset) {
11497                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
11498         } else if (nonblock) {
11499                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
11500         } else {
11501                 if (state->modeset)
11502                         flush_workqueue(dev_priv->modeset_wq);
11503                 intel_atomic_commit_tail(state);
11504         }
11505
11506         return 0;
11507 }
11508
11509 struct wait_rps_boost {
11510         struct wait_queue_entry wait;
11511
11512         struct drm_crtc *crtc;
11513         struct i915_request *request;
11514 };
11515
11516 static int do_rps_boost(struct wait_queue_entry *_wait,
11517                         unsigned mode, int sync, void *key)
11518 {
11519         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
11520         struct i915_request *rq = wait->request;
11521
11522         /*
11523          * If we missed the vblank, but the request is already running it
11524          * is reasonable to assume that it will complete before the next
11525          * vblank without our intervention, so leave RPS alone.
11526          */
11527         if (!i915_request_started(rq))
11528                 intel_rps_boost(rq);
11529         i915_request_put(rq);
11530
11531         drm_crtc_vblank_put(wait->crtc);
11532
11533         list_del(&wait->wait.entry);
11534         kfree(wait);
11535         return 1;
11536 }
11537
11538 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
11539                                        struct dma_fence *fence)
11540 {
11541         struct wait_rps_boost *wait;
11542
11543         if (!dma_fence_is_i915(fence))
11544                 return;
11545
11546         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
11547                 return;
11548
11549         if (drm_crtc_vblank_get(crtc))
11550                 return;
11551
11552         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
11553         if (!wait) {
11554                 drm_crtc_vblank_put(crtc);
11555                 return;
11556         }
11557
11558         wait->request = to_request(dma_fence_get(fence));
11559         wait->crtc = crtc;
11560
11561         wait->wait.func = do_rps_boost;
11562         wait->wait.flags = 0;
11563
11564         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
11565 }
11566
11567 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
11568 {
11569         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11570         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11571         struct drm_framebuffer *fb = plane_state->hw.fb;
11572         struct i915_vma *vma;
11573
11574         if (plane->id == PLANE_CURSOR &&
11575             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
11576                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11577                 const int align = intel_cursor_alignment(dev_priv);
11578                 int err;
11579
11580                 err = i915_gem_object_attach_phys(obj, align);
11581                 if (err)
11582                         return err;
11583         }
11584
11585         vma = intel_pin_and_fence_fb_obj(fb,
11586                                          &plane_state->view,
11587                                          intel_plane_uses_fence(plane_state),
11588                                          &plane_state->flags);
11589         if (IS_ERR(vma))
11590                 return PTR_ERR(vma);
11591
11592         plane_state->vma = vma;
11593
11594         return 0;
11595 }
11596
11597 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
11598 {
11599         struct i915_vma *vma;
11600
11601         vma = fetch_and_zero(&old_plane_state->vma);
11602         if (vma)
11603                 intel_unpin_fb_vma(vma, old_plane_state->flags);
11604 }
11605
11606 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
11607 {
11608         struct i915_sched_attr attr = {
11609                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
11610         };
11611
11612         i915_gem_object_wait_priority(obj, 0, &attr);
11613 }
11614
11615 /**
11616  * intel_prepare_plane_fb - Prepare fb for usage on plane
11617  * @_plane: drm plane to prepare for
11618  * @_new_plane_state: the plane state being prepared
11619  *
11620  * Prepares a framebuffer for usage on a display plane.  Generally this
11621  * involves pinning the underlying object and updating the frontbuffer tracking
11622  * bits.  Some older platforms need special physical address handling for
11623  * cursor planes.
11624  *
11625  * Returns 0 on success, negative error code on failure.
11626  */
11627 int
11628 intel_prepare_plane_fb(struct drm_plane *_plane,
11629                        struct drm_plane_state *_new_plane_state)
11630 {
11631         struct intel_plane *plane = to_intel_plane(_plane);
11632         struct intel_plane_state *new_plane_state =
11633                 to_intel_plane_state(_new_plane_state);
11634         struct intel_atomic_state *state =
11635                 to_intel_atomic_state(new_plane_state->uapi.state);
11636         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11637         const struct intel_plane_state *old_plane_state =
11638                 intel_atomic_get_old_plane_state(state, plane);
11639         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11640         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11641         int ret;
11642
11643         if (old_obj) {
11644                 const struct intel_crtc_state *crtc_state =
11645                         intel_atomic_get_new_crtc_state(state,
11646                                                         to_intel_crtc(old_plane_state->hw.crtc));
11647
11648                 /* Big Hammer, we also need to ensure that any pending
11649                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11650                  * current scanout is retired before unpinning the old
11651                  * framebuffer. Note that we rely on userspace rendering
11652                  * into the buffer attached to the pipe they are waiting
11653                  * on. If not, userspace generates a GPU hang with IPEHR
11654                  * point to the MI_WAIT_FOR_EVENT.
11655                  *
11656                  * This should only fail upon a hung GPU, in which case we
11657                  * can safely continue.
11658                  */
11659                 if (intel_crtc_needs_modeset(crtc_state)) {
11660                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
11661                                                               old_obj->base.resv, NULL,
11662                                                               false, 0,
11663                                                               GFP_KERNEL);
11664                         if (ret < 0)
11665                                 return ret;
11666                 }
11667         }
11668
11669         if (new_plane_state->uapi.fence) { /* explicit fencing */
11670                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11671                                                     new_plane_state->uapi.fence,
11672                                                     i915_fence_timeout(dev_priv),
11673                                                     GFP_KERNEL);
11674                 if (ret < 0)
11675                         return ret;
11676         }
11677
11678         if (!obj)
11679                 return 0;
11680
11681         ret = i915_gem_object_pin_pages(obj);
11682         if (ret)
11683                 return ret;
11684
11685         ret = intel_plane_pin_fb(new_plane_state);
11686
11687         i915_gem_object_unpin_pages(obj);
11688         if (ret)
11689                 return ret;
11690
11691         fb_obj_bump_render_priority(obj);
11692         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11693
11694         if (!new_plane_state->uapi.fence) { /* implicit fencing */
11695                 struct dma_fence *fence;
11696
11697                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
11698                                                       obj->base.resv, NULL,
11699                                                       false,
11700                                                       i915_fence_timeout(dev_priv),
11701                                                       GFP_KERNEL);
11702                 if (ret < 0)
11703                         goto unpin_fb;
11704
11705                 fence = dma_resv_get_excl_rcu(obj->base.resv);
11706                 if (fence) {
11707                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11708                                                    fence);
11709                         dma_fence_put(fence);
11710                 }
11711         } else {
11712                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11713                                            new_plane_state->uapi.fence);
11714         }
11715
11716         /*
11717          * We declare pageflips to be interactive and so merit a small bias
11718          * towards upclocking to deliver the frame on time. By only changing
11719          * the RPS thresholds to sample more regularly and aim for higher
11720          * clocks we can hopefully deliver low power workloads (like kodi)
11721          * that are not quite steady state without resorting to forcing
11722          * maximum clocks following a vblank miss (see do_rps_boost()).
11723          */
11724         if (!state->rps_interactive) {
11725                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11726                 state->rps_interactive = true;
11727         }
11728
11729         return 0;
11730
11731 unpin_fb:
11732         intel_plane_unpin_fb(new_plane_state);
11733
11734         return ret;
11735 }
11736
11737 /**
11738  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11739  * @plane: drm plane to clean up for
11740  * @_old_plane_state: the state from the previous modeset
11741  *
11742  * Cleans up a framebuffer that has just been removed from a plane.
11743  */
11744 void
11745 intel_cleanup_plane_fb(struct drm_plane *plane,
11746                        struct drm_plane_state *_old_plane_state)
11747 {
11748         struct intel_plane_state *old_plane_state =
11749                 to_intel_plane_state(_old_plane_state);
11750         struct intel_atomic_state *state =
11751                 to_intel_atomic_state(old_plane_state->uapi.state);
11752         struct drm_i915_private *dev_priv = to_i915(plane->dev);
11753         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11754
11755         if (!obj)
11756                 return;
11757
11758         if (state->rps_interactive) {
11759                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11760                 state->rps_interactive = false;
11761         }
11762
11763         /* Should only be called after a successful intel_prepare_plane_fb()! */
11764         intel_plane_unpin_fb(old_plane_state);
11765 }
11766
11767 /**
11768  * intel_plane_destroy - destroy a plane
11769  * @plane: plane to destroy
11770  *
11771  * Common destruction function for all types of planes (primary, cursor,
11772  * sprite).
11773  */
11774 void intel_plane_destroy(struct drm_plane *plane)
11775 {
11776         drm_plane_cleanup(plane);
11777         kfree(to_intel_plane(plane));
11778 }
11779
11780 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11781 {
11782         struct intel_plane *plane;
11783
11784         for_each_intel_plane(&dev_priv->drm, plane) {
11785                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11786                                                                   plane->pipe);
11787
11788                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11789         }
11790 }
11791
11792
11793 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11794                                       struct drm_file *file)
11795 {
11796         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11797         struct drm_crtc *drmmode_crtc;
11798         struct intel_crtc *crtc;
11799
11800         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11801         if (!drmmode_crtc)
11802                 return -ENOENT;
11803
11804         crtc = to_intel_crtc(drmmode_crtc);
11805         pipe_from_crtc_id->pipe = crtc->pipe;
11806
11807         return 0;
11808 }
11809
11810 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11811 {
11812         struct drm_device *dev = encoder->base.dev;
11813         struct intel_encoder *source_encoder;
11814         u32 possible_clones = 0;
11815
11816         for_each_intel_encoder(dev, source_encoder) {
11817                 if (encoders_cloneable(encoder, source_encoder))
11818                         possible_clones |= drm_encoder_mask(&source_encoder->base);
11819         }
11820
11821         return possible_clones;
11822 }
11823
11824 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11825 {
11826         struct drm_device *dev = encoder->base.dev;
11827         struct intel_crtc *crtc;
11828         u32 possible_crtcs = 0;
11829
11830         for_each_intel_crtc(dev, crtc) {
11831                 if (encoder->pipe_mask & BIT(crtc->pipe))
11832                         possible_crtcs |= drm_crtc_mask(&crtc->base);
11833         }
11834
11835         return possible_crtcs;
11836 }
11837
11838 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11839 {
11840         if (!IS_MOBILE(dev_priv))
11841                 return false;
11842
11843         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11844                 return false;
11845
11846         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11847                 return false;
11848
11849         return true;
11850 }
11851
11852 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11853 {
11854         if (INTEL_GEN(dev_priv) >= 9)
11855                 return false;
11856
11857         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11858                 return false;
11859
11860         if (HAS_PCH_LPT_H(dev_priv) &&
11861             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11862                 return false;
11863
11864         /* DDI E can't be used if DDI A requires 4 lanes */
11865         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11866                 return false;
11867
11868         if (!dev_priv->vbt.int_crt_support)
11869                 return false;
11870
11871         return true;
11872 }
11873
11874 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11875 {
11876         struct intel_encoder *encoder;
11877         bool dpd_is_edp = false;
11878
11879         intel_pps_unlock_regs_wa(dev_priv);
11880
11881         if (!HAS_DISPLAY(dev_priv))
11882                 return;
11883
11884         if (IS_ALDERLAKE_S(dev_priv)) {
11885                 intel_ddi_init(dev_priv, PORT_A);
11886                 intel_ddi_init(dev_priv, PORT_TC1);
11887                 intel_ddi_init(dev_priv, PORT_TC2);
11888                 intel_ddi_init(dev_priv, PORT_TC3);
11889                 intel_ddi_init(dev_priv, PORT_TC4);
11890         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11891                 intel_ddi_init(dev_priv, PORT_A);
11892                 intel_ddi_init(dev_priv, PORT_B);
11893                 intel_ddi_init(dev_priv, PORT_TC1);
11894                 intel_ddi_init(dev_priv, PORT_TC2);
11895         } else if (INTEL_GEN(dev_priv) >= 12) {
11896                 intel_ddi_init(dev_priv, PORT_A);
11897                 intel_ddi_init(dev_priv, PORT_B);
11898                 intel_ddi_init(dev_priv, PORT_TC1);
11899                 intel_ddi_init(dev_priv, PORT_TC2);
11900                 intel_ddi_init(dev_priv, PORT_TC3);
11901                 intel_ddi_init(dev_priv, PORT_TC4);
11902                 intel_ddi_init(dev_priv, PORT_TC5);
11903                 intel_ddi_init(dev_priv, PORT_TC6);
11904                 icl_dsi_init(dev_priv);
11905         } else if (IS_JSL_EHL(dev_priv)) {
11906                 intel_ddi_init(dev_priv, PORT_A);
11907                 intel_ddi_init(dev_priv, PORT_B);
11908                 intel_ddi_init(dev_priv, PORT_C);
11909                 intel_ddi_init(dev_priv, PORT_D);
11910                 icl_dsi_init(dev_priv);
11911         } else if (IS_GEN(dev_priv, 11)) {
11912                 intel_ddi_init(dev_priv, PORT_A);
11913                 intel_ddi_init(dev_priv, PORT_B);
11914                 intel_ddi_init(dev_priv, PORT_C);
11915                 intel_ddi_init(dev_priv, PORT_D);
11916                 intel_ddi_init(dev_priv, PORT_E);
11917                 /*
11918                  * On some ICL SKUs port F is not present. No strap bits for
11919                  * this, so rely on VBT.
11920                  * Work around broken VBTs on SKUs known to have no port F.
11921                  */
11922                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
11923                     intel_bios_is_port_present(dev_priv, PORT_F))
11924                         intel_ddi_init(dev_priv, PORT_F);
11925
11926                 icl_dsi_init(dev_priv);
11927         } else if (IS_GEN9_LP(dev_priv)) {
11928                 /*
11929                  * FIXME: Broxton doesn't support port detection via the
11930                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
11931                  * detect the ports.
11932                  */
11933                 intel_ddi_init(dev_priv, PORT_A);
11934                 intel_ddi_init(dev_priv, PORT_B);
11935                 intel_ddi_init(dev_priv, PORT_C);
11936
11937                 vlv_dsi_init(dev_priv);
11938         } else if (HAS_DDI(dev_priv)) {
11939                 int found;
11940
11941                 if (intel_ddi_crt_present(dev_priv))
11942                         intel_crt_init(dev_priv);
11943
11944                 /*
11945                  * Haswell uses DDI functions to detect digital outputs.
11946                  * On SKL pre-D0 the strap isn't connected. Later SKUs may or
11947                  * may not have it - it was supposed to be fixed by the same
11948                  * time we stopped using straps. Assume it's there.
11949                  */
11950                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11951                 /* WaIgnoreDDIAStrap: skl */
11952                 if (found || IS_GEN9_BC(dev_priv))
11953                         intel_ddi_init(dev_priv, PORT_A);
11954
11955                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
11956                  * register */
11957                 found = intel_de_read(dev_priv, SFUSE_STRAP);
11958
11959                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11960                         intel_ddi_init(dev_priv, PORT_B);
11961                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11962                         intel_ddi_init(dev_priv, PORT_C);
11963                 if (found & SFUSE_STRAP_DDID_DETECTED)
11964                         intel_ddi_init(dev_priv, PORT_D);
11965                 if (found & SFUSE_STRAP_DDIF_DETECTED)
11966                         intel_ddi_init(dev_priv, PORT_F);
11967                 /*
11968                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
11969                  */
11970                 if (IS_GEN9_BC(dev_priv) &&
11971                     intel_bios_is_port_present(dev_priv, PORT_E))
11972                         intel_ddi_init(dev_priv, PORT_E);
11973
11974         } else if (HAS_PCH_SPLIT(dev_priv)) {
11975                 int found;
11976
11977                 /*
11978                  * intel_edp_init_connector() depends on this completing first,
11979                  * to prevent the registration of both eDP and LVDS and the
11980                  * incorrect sharing of the PPS.
11981                  */
11982                 intel_lvds_init(dev_priv);
11983                 intel_crt_init(dev_priv);
11984
11985                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11986
11987                 if (ilk_has_edp_a(dev_priv))
11988                         intel_dp_init(dev_priv, DP_A, PORT_A);
11989
11990                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11991                         /* PCH SDVOB multiplex with HDMIB */
11992                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11993                         if (!found)
11994                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11995                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11996                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
11997                 }
11998
11999                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
12000                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
12001
12002                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
12003                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
12004
12005                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
12006                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
12007
12008                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
12009                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
12010         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12011                 bool has_edp, has_port;
12012
12013                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
12014                         intel_crt_init(dev_priv);
12015
12016                 /*
12017                  * The DP_DETECTED bit is the latched state of the DDC
12018                  * SDA pin at boot. However since eDP doesn't require DDC
12019                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
12020                  * eDP ports may have been muxed to an alternate function.
12021                  * Thus we can't rely on the DP_DETECTED bit alone to detect
12022                  * eDP ports. Consult the VBT as well as DP_DETECTED to
12023                  * detect eDP ports.
12024                  *
12025                  * Sadly the straps seem to be missing sometimes even for HDMI
12026                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
12027                  * and VBT for the presence of the port. Additionally we can't
12028                  * trust the port type the VBT declares as we've seen at least
12029                  * HDMI ports that the VBT claim are DP or eDP.
12030                  */
12031                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
12032                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
12033                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
12034                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
12035                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
12036                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
12037
12038                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
12039                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
12040                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
12041                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
12042                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
12043                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
12044
12045                 if (IS_CHERRYVIEW(dev_priv)) {
12046                         /*
12047                          * eDP not supported on port D,
12048                          * so no need to worry about it
12049                          */
12050                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
12051                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
12052                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
12053                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
12054                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
12055                 }
12056
12057                 vlv_dsi_init(dev_priv);
12058         } else if (IS_PINEVIEW(dev_priv)) {
12059                 intel_lvds_init(dev_priv);
12060                 intel_crt_init(dev_priv);
12061         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
12062                 bool found = false;
12063
12064                 if (IS_MOBILE(dev_priv))
12065                         intel_lvds_init(dev_priv);
12066
12067                 intel_crt_init(dev_priv);
12068
12069                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
12070                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
12071                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
12072                         if (!found && IS_G4X(dev_priv)) {
12073                                 drm_dbg_kms(&dev_priv->drm,
12074                                             "probing HDMI on SDVOB\n");
12075                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
12076                         }
12077
12078                         if (!found && IS_G4X(dev_priv))
12079                                 intel_dp_init(dev_priv, DP_B, PORT_B);
12080                 }
12081
12082                 /* Before G4X SDVOC doesn't have its own detect register */
12083
12084                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
12085                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
12086                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
12087                 }
12088
12089                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
12090
12091                         if (IS_G4X(dev_priv)) {
12092                                 drm_dbg_kms(&dev_priv->drm,
12093                                             "probing HDMI on SDVOC\n");
12094                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
12095                         }
12096                         if (IS_G4X(dev_priv))
12097                                 intel_dp_init(dev_priv, DP_C, PORT_C);
12098                 }
12099
12100                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
12101                         intel_dp_init(dev_priv, DP_D, PORT_D);
12102
12103                 if (SUPPORTS_TV(dev_priv))
12104                         intel_tv_init(dev_priv);
12105         } else if (IS_GEN(dev_priv, 2)) {
12106                 if (IS_I85X(dev_priv))
12107                         intel_lvds_init(dev_priv);
12108
12109                 intel_crt_init(dev_priv);
12110                 intel_dvo_init(dev_priv);
12111         }
12112
12113         for_each_intel_encoder(&dev_priv->drm, encoder) {
12114                 encoder->base.possible_crtcs =
12115                         intel_encoder_possible_crtcs(encoder);
12116                 encoder->base.possible_clones =
12117                         intel_encoder_possible_clones(encoder);
12118         }
12119
12120         intel_init_pch_refclk(dev_priv);
12121
12122         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
12123 }
12124
12125 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
12126 {
12127         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
12128
12129         drm_framebuffer_cleanup(fb);
12130         intel_frontbuffer_put(intel_fb->frontbuffer);
12131
12132         kfree(intel_fb);
12133 }
12134
12135 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
12136                                                 struct drm_file *file,
12137                                                 unsigned int *handle)
12138 {
12139         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12140         struct drm_i915_private *i915 = to_i915(obj->base.dev);
12141
12142         if (obj->userptr.mm) {
12143                 drm_dbg(&i915->drm,
12144                         "attempting to use a userptr for a framebuffer, denied\n");
12145                 return -EINVAL;
12146         }
12147
12148         return drm_gem_handle_create(file, &obj->base, handle);
12149 }
12150
12151 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
12152                                         struct drm_file *file,
12153                                         unsigned flags, unsigned color,
12154                                         struct drm_clip_rect *clips,
12155                                         unsigned num_clips)
12156 {
12157         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12158
12159         i915_gem_object_flush_if_display(obj);
12160         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
12161
12162         return 0;
12163 }
12164
12165 static const struct drm_framebuffer_funcs intel_fb_funcs = {
12166         .destroy = intel_user_framebuffer_destroy,
12167         .create_handle = intel_user_framebuffer_create_handle,
12168         .dirty = intel_user_framebuffer_dirty,
12169 };
12170
12171 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
12172                                   struct drm_i915_gem_object *obj,
12173                                   struct drm_mode_fb_cmd2 *mode_cmd)
12174 {
12175         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
12176         struct drm_framebuffer *fb = &intel_fb->base;
12177         u32 max_stride;
12178         unsigned int tiling, stride;
12179         int ret = -EINVAL;
12180         int i;
12181
12182         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
12183         if (!intel_fb->frontbuffer)
12184                 return -ENOMEM;
12185
12186         i915_gem_object_lock(obj, NULL);
12187         tiling = i915_gem_object_get_tiling(obj);
12188         stride = i915_gem_object_get_stride(obj);
12189         i915_gem_object_unlock(obj);
12190
12191         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
12192                 /*
12193                  * If there's a fence, enforce that
12194                  * the fb modifier and tiling mode match.
12195                  */
12196                 if (tiling != I915_TILING_NONE &&
12197                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
12198                         drm_dbg_kms(&dev_priv->drm,
12199                                     "tiling_mode doesn't match fb modifier\n");
12200                         goto err;
12201                 }
12202         } else {
12203                 if (tiling == I915_TILING_X) {
12204                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
12205                 } else if (tiling == I915_TILING_Y) {
12206                         drm_dbg_kms(&dev_priv->drm,
12207                                     "No Y tiling for legacy addfb\n");
12208                         goto err;
12209                 }
12210         }
12211
12212         if (!drm_any_plane_has_format(&dev_priv->drm,
12213                                       mode_cmd->pixel_format,
12214                                       mode_cmd->modifier[0])) {
12215                 struct drm_format_name_buf format_name;
12216
12217                 drm_dbg_kms(&dev_priv->drm,
12218                             "unsupported pixel format %s / modifier 0x%llx\n",
12219                             drm_get_format_name(mode_cmd->pixel_format,
12220                                                 &format_name),
12221                             mode_cmd->modifier[0]);
12222                 goto err;
12223         }
12224
12225         /*
12226          * gen2/3 display engine uses the fence if present,
12227          * so the tiling mode must match the fb modifier exactly.
12228          */
12229         if (INTEL_GEN(dev_priv) < 4 &&
12230             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
12231                 drm_dbg_kms(&dev_priv->drm,
12232                             "tiling_mode must match fb modifier exactly on gen2/3\n");
12233                 goto err;
12234         }
12235
12236         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
12237                                          mode_cmd->modifier[0]);
12238         if (mode_cmd->pitches[0] > max_stride) {
12239                 drm_dbg_kms(&dev_priv->drm,
12240                             "%s pitch (%u) must be at most %d\n",
12241                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
12242                             "tiled" : "linear",
12243                             mode_cmd->pitches[0], max_stride);
12244                 goto err;
12245         }
12246
12247         /*
12248          * If there's a fence, enforce that
12249          * the fb pitch and fence stride match.
12250          */
12251         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
12252                 drm_dbg_kms(&dev_priv->drm,
12253                             "pitch (%d) must match tiling stride (%d)\n",
12254                             mode_cmd->pitches[0], stride);
12255                 goto err;
12256         }
12257
12258         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12259         if (mode_cmd->offsets[0] != 0) {
12260                 drm_dbg_kms(&dev_priv->drm,
12261                             "plane 0 offset (0x%08x) must be 0\n",
12262                             mode_cmd->offsets[0]);
12263                 goto err;
12264         }
12265
12266         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
12267
12268         for (i = 0; i < fb->format->num_planes; i++) {
12269                 u32 stride_alignment;
12270
12271                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
12272                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
12273                                     i);
12274                         goto err;
12275                 }
12276
12277                 stride_alignment = intel_fb_stride_alignment(fb, i);
12278                 if (fb->pitches[i] & (stride_alignment - 1)) {
12279                         drm_dbg_kms(&dev_priv->drm,
12280                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
12281                                     i, fb->pitches[i], stride_alignment);
12282                         goto err;
12283                 }
12284
12285                 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
12286                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
12287
12288                         if (fb->pitches[i] != ccs_aux_stride) {
12289                                 drm_dbg_kms(&dev_priv->drm,
12290                                             "ccs aux plane %d pitch (%d) must be %d\n",
12291                                             i,
12292                                             fb->pitches[i], ccs_aux_stride);
12293                                 goto err;
12294                         }
12295                 }
12296
12297                 fb->obj[i] = &obj->base;
12298         }
12299
12300         ret = intel_fill_fb_info(dev_priv, fb);
12301         if (ret)
12302                 goto err;
12303
12304         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
12305         if (ret) {
12306                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
12307                 goto err;
12308         }
12309
12310         return 0;
12311
12312 err:
12313         intel_frontbuffer_put(intel_fb->frontbuffer);
12314         return ret;
12315 }
12316
12317 static struct drm_framebuffer *
12318 intel_user_framebuffer_create(struct drm_device *dev,
12319                               struct drm_file *filp,
12320                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
12321 {
12322         struct drm_framebuffer *fb;
12323         struct drm_i915_gem_object *obj;
12324         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
12325
12326         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
12327         if (!obj)
12328                 return ERR_PTR(-ENOENT);
12329
12330         fb = intel_framebuffer_create(obj, &mode_cmd);
12331         i915_gem_object_put(obj);
12332
12333         return fb;
12334 }
12335
12336 static enum drm_mode_status
12337 intel_mode_valid(struct drm_device *dev,
12338                  const struct drm_display_mode *mode)
12339 {
12340         struct drm_i915_private *dev_priv = to_i915(dev);
12341         int hdisplay_max, htotal_max;
12342         int vdisplay_max, vtotal_max;
12343
12344         /*
12345          * Can't reject DBLSCAN here because Xorg ddxen can add piles
12346          * of DBLSCAN modes to the output's mode list when they detect
12347          * the scaling mode property on the connector. And they don't
12348          * ask the kernel to validate those modes in any way until
12349          * modeset time at which point the client gets a protocol error.
12350          * So in order to not upset those clients we silently ignore the
12351          * DBLSCAN flag on such connectors. For other connectors we will
12352          * reject modes with the DBLSCAN flag in encoder->compute_config().
12353          * And we always reject DBLSCAN modes in connector->mode_valid()
12354          * as we never want such modes on the connector's mode list.
12355          */
12356
12357         if (mode->vscan > 1)
12358                 return MODE_NO_VSCAN;
12359
12360         if (mode->flags & DRM_MODE_FLAG_HSKEW)
12361                 return MODE_H_ILLEGAL;
12362
12363         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
12364                            DRM_MODE_FLAG_NCSYNC |
12365                            DRM_MODE_FLAG_PCSYNC))
12366                 return MODE_HSYNC;
12367
12368         if (mode->flags & (DRM_MODE_FLAG_BCAST |
12369                            DRM_MODE_FLAG_PIXMUX |
12370                            DRM_MODE_FLAG_CLKDIV2))
12371                 return MODE_BAD;
12372
12373         /* Transcoder timing limits */
12374         if (INTEL_GEN(dev_priv) >= 11) {
12375                 hdisplay_max = 16384;
12376                 vdisplay_max = 8192;
12377                 htotal_max = 16384;
12378                 vtotal_max = 8192;
12379         } else if (INTEL_GEN(dev_priv) >= 9 ||
12380                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12381                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
12382                 vdisplay_max = 4096;
12383                 htotal_max = 8192;
12384                 vtotal_max = 8192;
12385         } else if (INTEL_GEN(dev_priv) >= 3) {
12386                 hdisplay_max = 4096;
12387                 vdisplay_max = 4096;
12388                 htotal_max = 8192;
12389                 vtotal_max = 8192;
12390         } else {
12391                 hdisplay_max = 2048;
12392                 vdisplay_max = 2048;
12393                 htotal_max = 4096;
12394                 vtotal_max = 4096;
12395         }
12396
12397         if (mode->hdisplay > hdisplay_max ||
12398             mode->hsync_start > htotal_max ||
12399             mode->hsync_end > htotal_max ||
12400             mode->htotal > htotal_max)
12401                 return MODE_H_ILLEGAL;
12402
12403         if (mode->vdisplay > vdisplay_max ||
12404             mode->vsync_start > vtotal_max ||
12405             mode->vsync_end > vtotal_max ||
12406             mode->vtotal > vtotal_max)
12407                 return MODE_V_ILLEGAL;
12408
12409         if (INTEL_GEN(dev_priv) >= 5) {
12410                 if (mode->hdisplay < 64 ||
12411                     mode->htotal - mode->hdisplay < 32)
12412                         return MODE_H_ILLEGAL;
12413
12414                 if (mode->vtotal - mode->vdisplay < 5)
12415                         return MODE_V_ILLEGAL;
12416         } else {
12417                 if (mode->htotal - mode->hdisplay < 32)
12418                         return MODE_H_ILLEGAL;
12419
12420                 if (mode->vtotal - mode->vdisplay < 3)
12421                         return MODE_V_ILLEGAL;
12422         }
12423
12424         return MODE_OK;
12425 }
12426
12427 enum drm_mode_status
12428 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
12429                                 const struct drm_display_mode *mode,
12430                                 bool bigjoiner)
12431 {
12432         int plane_width_max, plane_height_max;
12433
12434         /*
12435          * intel_mode_valid() should be
12436          * sufficient on older platforms.
12437          */
12438         if (INTEL_GEN(dev_priv) < 9)
12439                 return MODE_OK;
12440
12441         /*
12442          * Most people will probably want a fullscreen
12443          * plane so let's not advertize modes that are
12444          * too big for that.
12445          */
12446         if (INTEL_GEN(dev_priv) >= 11) {
12447                 plane_width_max = 5120 << bigjoiner;
12448                 plane_height_max = 4320;
12449         } else {
12450                 plane_width_max = 5120;
12451                 plane_height_max = 4096;
12452         }
12453
12454         if (mode->hdisplay > plane_width_max)
12455                 return MODE_H_ILLEGAL;
12456
12457         if (mode->vdisplay > plane_height_max)
12458                 return MODE_V_ILLEGAL;
12459
12460         return MODE_OK;
12461 }
12462
12463 static const struct drm_mode_config_funcs intel_mode_funcs = {
12464         .fb_create = intel_user_framebuffer_create,
12465         .get_format_info = intel_get_format_info,
12466         .output_poll_changed = intel_fbdev_output_poll_changed,
12467         .mode_valid = intel_mode_valid,
12468         .atomic_check = intel_atomic_check,
12469         .atomic_commit = intel_atomic_commit,
12470         .atomic_state_alloc = intel_atomic_state_alloc,
12471         .atomic_state_clear = intel_atomic_state_clear,
12472         .atomic_state_free = intel_atomic_state_free,
12473 };
12474
12475 /**
12476  * intel_init_display_hooks - initialize the display modesetting hooks
12477  * @dev_priv: device private
12478  */
12479 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
12480 {
12481         intel_init_cdclk_hooks(dev_priv);
12482
12483         intel_dpll_init_clock_hook(dev_priv);
12484
12485         if (INTEL_GEN(dev_priv) >= 9) {
12486                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12487                 dev_priv->display.crtc_enable = hsw_crtc_enable;
12488                 dev_priv->display.crtc_disable = hsw_crtc_disable;
12489         } else if (HAS_DDI(dev_priv)) {
12490                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12491                 dev_priv->display.crtc_enable = hsw_crtc_enable;
12492                 dev_priv->display.crtc_disable = hsw_crtc_disable;
12493         } else if (HAS_PCH_SPLIT(dev_priv)) {
12494                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
12495                 dev_priv->display.crtc_enable = ilk_crtc_enable;
12496                 dev_priv->display.crtc_disable = ilk_crtc_disable;
12497         } else if (IS_CHERRYVIEW(dev_priv) ||
12498                    IS_VALLEYVIEW(dev_priv)) {
12499                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12500                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12501                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12502         } else {
12503                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12504                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12505                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12506         }
12507
12508         intel_fdi_init_hook(dev_priv);
12509
12510         if (INTEL_GEN(dev_priv) >= 9) {
12511                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
12512                 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
12513         } else {
12514                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
12515                 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
12516         }
12517
12518 }
12519
12520 void intel_modeset_init_hw(struct drm_i915_private *i915)
12521 {
12522         struct intel_cdclk_state *cdclk_state =
12523                 to_intel_cdclk_state(i915->cdclk.obj.state);
12524
12525         intel_update_cdclk(i915);
12526         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
12527         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
12528 }
12529
12530 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
12531 {
12532         struct drm_plane *plane;
12533         struct intel_crtc *crtc;
12534
12535         for_each_intel_crtc(state->dev, crtc) {
12536                 struct intel_crtc_state *crtc_state;
12537
12538                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
12539                 if (IS_ERR(crtc_state))
12540                         return PTR_ERR(crtc_state);
12541
12542                 if (crtc_state->hw.active) {
12543                         /*
12544                          * Preserve the inherited flag to avoid
12545                          * taking the full modeset path.
12546                          */
12547                         crtc_state->inherited = true;
12548                 }
12549         }
12550
12551         drm_for_each_plane(plane, state->dev) {
12552                 struct drm_plane_state *plane_state;
12553
12554                 plane_state = drm_atomic_get_plane_state(state, plane);
12555                 if (IS_ERR(plane_state))
12556                         return PTR_ERR(plane_state);
12557         }
12558
12559         return 0;
12560 }
12561
12562 /*
12563  * Calculate what we think the watermarks should be for the state we've read
12564  * out of the hardware and then immediately program those watermarks so that
12565  * we ensure the hardware settings match our internal state.
12566  *
12567  * We can calculate what we think WM's should be by creating a duplicate of the
12568  * current state (which was constructed during hardware readout) and running it
12569  * through the atomic check code to calculate new watermark values in the
12570  * state object.
12571  */
12572 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
12573 {
12574         struct drm_atomic_state *state;
12575         struct intel_atomic_state *intel_state;
12576         struct intel_crtc *crtc;
12577         struct intel_crtc_state *crtc_state;
12578         struct drm_modeset_acquire_ctx ctx;
12579         int ret;
12580         int i;
12581
12582         /* Only supported on platforms that use atomic watermark design */
12583         if (!dev_priv->display.optimize_watermarks)
12584                 return;
12585
12586         state = drm_atomic_state_alloc(&dev_priv->drm);
12587         if (drm_WARN_ON(&dev_priv->drm, !state))
12588                 return;
12589
12590         intel_state = to_intel_atomic_state(state);
12591
12592         drm_modeset_acquire_init(&ctx, 0);
12593
12594 retry:
12595         state->acquire_ctx = &ctx;
12596
12597         /*
12598          * Hardware readout is the only time we don't want to calculate
12599          * intermediate watermarks (since we don't trust the current
12600          * watermarks).
12601          */
12602         if (!HAS_GMCH(dev_priv))
12603                 intel_state->skip_intermediate_wm = true;
12604
12605         ret = sanitize_watermarks_add_affected(state);
12606         if (ret)
12607                 goto fail;
12608
12609         ret = intel_atomic_check(&dev_priv->drm, state);
12610         if (ret)
12611                 goto fail;
12612
12613         /* Write calculated watermark values back */
12614         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12615                 crtc_state->wm.need_postvbl_update = true;
12616                 dev_priv->display.optimize_watermarks(intel_state, crtc);
12617
12618                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12619         }
12620
12621 fail:
12622         if (ret == -EDEADLK) {
12623                 drm_atomic_state_clear(state);
12624                 drm_modeset_backoff(&ctx);
12625                 goto retry;
12626         }
12627
12628         /*
12629          * If we fail here, it means that the hardware appears to be
12630          * programmed in a way that shouldn't be possible, given our
12631          * understanding of watermark requirements.  This might mean a
12632          * mistake in the hardware readout code or a mistake in the
12633          * watermark calculations for a given platform.  Raise a WARN
12634          * so that this is noticeable.
12635          *
12636          * If this actually happens, we'll have to just leave the
12637          * BIOS-programmed watermarks untouched and hope for the best.
12638          */
12639         drm_WARN(&dev_priv->drm, ret,
12640                  "Could not determine valid watermarks for inherited state\n");
12641
12642         drm_atomic_state_put(state);
12643
12644         drm_modeset_drop_locks(&ctx);
12645         drm_modeset_acquire_fini(&ctx);
12646 }
12647
12648 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12649 {
12650         if (IS_GEN(dev_priv, 5)) {
12651                 u32 fdi_pll_clk =
12652                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12653
12654                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12655         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
12656                 dev_priv->fdi_pll_freq = 270000;
12657         } else {
12658                 return;
12659         }
12660
12661         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12662 }
12663
12664 static int intel_initial_commit(struct drm_device *dev)
12665 {
12666         struct drm_atomic_state *state = NULL;
12667         struct drm_modeset_acquire_ctx ctx;
12668         struct intel_crtc *crtc;
12669         int ret = 0;
12670
12671         state = drm_atomic_state_alloc(dev);
12672         if (!state)
12673                 return -ENOMEM;
12674
12675         drm_modeset_acquire_init(&ctx, 0);
12676
12677 retry:
12678         state->acquire_ctx = &ctx;
12679
12680         for_each_intel_crtc(dev, crtc) {
12681                 struct intel_crtc_state *crtc_state =
12682                         intel_atomic_get_crtc_state(state, crtc);
12683
12684                 if (IS_ERR(crtc_state)) {
12685                         ret = PTR_ERR(crtc_state);
12686                         goto out;
12687                 }
12688
12689                 if (crtc_state->hw.active) {
12690                         struct intel_encoder *encoder;
12691
12692                         /*
12693                          * We've not yet detected sink capabilities
12694                          * (audio,infoframes,etc.) and thus we don't want to
12695                          * force a full state recomputation yet. We want that to
12696                          * happen only for the first real commit from userspace.
12697                          * So preserve the inherited flag for the time being.
12698                          */
12699                         crtc_state->inherited = true;
12700
12701                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
12702                         if (ret)
12703                                 goto out;
12704
12705                         /*
12706                          * FIXME hack to force a LUT update to avoid the
12707                          * plane update forcing the pipe gamma on without
12708                          * having a proper LUT loaded. Remove once we
12709                          * have readout for pipe gamma enable.
12710                          */
12711                         crtc_state->uapi.color_mgmt_changed = true;
12712
12713                         for_each_intel_encoder_mask(dev, encoder,
12714                                                     crtc_state->uapi.encoder_mask) {
12715                                 if (encoder->initial_fastset_check &&
12716                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
12717                                         ret = drm_atomic_add_affected_connectors(state,
12718                                                                                  &crtc->base);
12719                                         if (ret)
12720                                                 goto out;
12721                                 }
12722                         }
12723                 }
12724         }
12725
12726         ret = drm_atomic_commit(state);
12727
12728 out:
12729         if (ret == -EDEADLK) {
12730                 drm_atomic_state_clear(state);
12731                 drm_modeset_backoff(&ctx);
12732                 goto retry;
12733         }
12734
12735         drm_atomic_state_put(state);
12736
12737         drm_modeset_drop_locks(&ctx);
12738         drm_modeset_acquire_fini(&ctx);
12739
12740         return ret;
12741 }
12742
12743 static void intel_mode_config_init(struct drm_i915_private *i915)
12744 {
12745         struct drm_mode_config *mode_config = &i915->drm.mode_config;
12746
12747         drm_mode_config_init(&i915->drm);
12748         INIT_LIST_HEAD(&i915->global_obj_list);
12749
12750         mode_config->min_width = 0;
12751         mode_config->min_height = 0;
12752
12753         mode_config->preferred_depth = 24;
12754         mode_config->prefer_shadow = 1;
12755
12756         mode_config->allow_fb_modifiers = true;
12757
12758         mode_config->funcs = &intel_mode_funcs;
12759
12760         mode_config->async_page_flip = has_async_flips(i915);
12761
12762         /*
12763          * Maximum framebuffer dimensions, chosen to match
12764          * the maximum render engine surface size on gen4+.
12765          */
12766         if (INTEL_GEN(i915) >= 7) {
12767                 mode_config->max_width = 16384;
12768                 mode_config->max_height = 16384;
12769         } else if (INTEL_GEN(i915) >= 4) {
12770                 mode_config->max_width = 8192;
12771                 mode_config->max_height = 8192;
12772         } else if (IS_GEN(i915, 3)) {
12773                 mode_config->max_width = 4096;
12774                 mode_config->max_height = 4096;
12775         } else {
12776                 mode_config->max_width = 2048;
12777                 mode_config->max_height = 2048;
12778         }
12779
12780         if (IS_I845G(i915) || IS_I865G(i915)) {
12781                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12782                 mode_config->cursor_height = 1023;
12783         } else if (IS_I830(i915) || IS_I85X(i915) ||
12784                    IS_I915G(i915) || IS_I915GM(i915)) {
12785                 mode_config->cursor_width = 64;
12786                 mode_config->cursor_height = 64;
12787         } else {
12788                 mode_config->cursor_width = 256;
12789                 mode_config->cursor_height = 256;
12790         }
12791 }
12792
12793 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12794 {
12795         intel_atomic_global_obj_cleanup(i915);
12796         drm_mode_config_cleanup(&i915->drm);
12797 }
12798
12799 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12800 {
12801         if (plane_config->fb) {
12802                 struct drm_framebuffer *fb = &plane_config->fb->base;
12803
12804                 /* We may only have the stub and not a full framebuffer */
12805                 if (drm_framebuffer_read_refcount(fb))
12806                         drm_framebuffer_put(fb);
12807                 else
12808                         kfree(fb);
12809         }
12810
12811         if (plane_config->vma)
12812                 i915_vma_put(plane_config->vma);
12813 }
12814
12815 /* part #1: call before irq install */
12816 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12817 {
12818         int ret;
12819
12820         if (i915_inject_probe_failure(i915))
12821                 return -ENODEV;
12822
12823         if (HAS_DISPLAY(i915)) {
12824                 ret = drm_vblank_init(&i915->drm,
12825                                       INTEL_NUM_PIPES(i915));
12826                 if (ret)
12827                         return ret;
12828         }
12829
12830         intel_bios_init(i915);
12831
12832         ret = intel_vga_register(i915);
12833         if (ret)
12834                 goto cleanup_bios;
12835
12836         /* FIXME: completely on the wrong abstraction layer */
12837         intel_power_domains_init_hw(i915, false);
12838
12839         intel_csr_ucode_init(i915);
12840
12841         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12842         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12843                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12844
12845         i915->framestart_delay = 1; /* 1-4 */
12846
12847         intel_mode_config_init(i915);
12848
12849         ret = intel_cdclk_init(i915);
12850         if (ret)
12851                 goto cleanup_vga_client_pw_domain_csr;
12852
12853         ret = intel_dbuf_init(i915);
12854         if (ret)
12855                 goto cleanup_vga_client_pw_domain_csr;
12856
12857         ret = intel_bw_init(i915);
12858         if (ret)
12859                 goto cleanup_vga_client_pw_domain_csr;
12860
12861         init_llist_head(&i915->atomic_helper.free_list);
12862         INIT_WORK(&i915->atomic_helper.free_work,
12863                   intel_atomic_helper_free_state_worker);
12864
12865         intel_init_quirks(i915);
12866
12867         intel_fbc_init(i915);
12868
12869         return 0;
12870
12871 cleanup_vga_client_pw_domain_csr:
12872         intel_csr_ucode_fini(i915);
12873         intel_power_domains_driver_remove(i915);
12874         intel_vga_unregister(i915);
12875 cleanup_bios:
12876         intel_bios_driver_remove(i915);
12877
12878         return ret;
12879 }
12880
12881 /* part #2: call after irq install, but before gem init */
12882 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12883 {
12884         struct drm_device *dev = &i915->drm;
12885         enum pipe pipe;
12886         struct intel_crtc *crtc;
12887         int ret;
12888
12889         intel_init_pm(i915);
12890
12891         intel_panel_sanitize_ssc(i915);
12892
12893         intel_pps_setup(i915);
12894
12895         intel_gmbus_setup(i915);
12896
12897         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12898                     INTEL_NUM_PIPES(i915),
12899                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12900
12901         if (HAS_DISPLAY(i915)) {
12902                 for_each_pipe(i915, pipe) {
12903                         ret = intel_crtc_init(i915, pipe);
12904                         if (ret) {
12905                                 intel_mode_config_cleanup(i915);
12906                                 return ret;
12907                         }
12908                 }
12909         }
12910
12911         intel_plane_possible_crtcs_init(i915);
12912         intel_shared_dpll_init(dev);
12913         intel_update_fdi_pll_freq(i915);
12914
12915         intel_update_czclk(i915);
12916         intel_modeset_init_hw(i915);
12917
12918         intel_hdcp_component_init(i915);
12919
12920         if (i915->max_cdclk_freq == 0)
12921                 intel_update_max_cdclk(i915);
12922
12923         /*
12924          * If the platform has HTI, we need to find out whether it has reserved
12925          * any display resources before we create our display outputs.
12926          */
12927         if (INTEL_INFO(i915)->display.has_hti)
12928                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12929
12930         /* Just disable it once at startup */
12931         intel_vga_disable(i915);
12932         intel_setup_outputs(i915);
12933
12934         drm_modeset_lock_all(dev);
12935         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12936         drm_modeset_unlock_all(dev);
12937
12938         for_each_intel_crtc(dev, crtc) {
12939                 struct intel_initial_plane_config plane_config = {};
12940
12941                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12942                         continue;
12943
12944                 /*
12945                  * Note that reserving the BIOS fb up front prevents us
12946                  * from stuffing other stolen allocations like the ring
12947                  * on top.  This prevents some ugliness at boot time, and
12948                  * can even allow for smooth boot transitions if the BIOS
12949                  * fb is large enough for the active pipe configuration.
12950                  */
12951                 i915->display.get_initial_plane_config(crtc, &plane_config);
12952
12953                 /*
12954                  * If the fb is shared between multiple heads, we'll
12955                  * just get the first one.
12956                  */
12957                 intel_find_initial_plane_obj(crtc, &plane_config);
12958
12959                 plane_config_fini(&plane_config);
12960         }
12961
12962         /*
12963          * Make sure hardware watermarks really match the state we read out.
12964          * Note that we need to do this after reconstructing the BIOS fb's
12965          * since the watermark calculation done here will use pstate->fb.
12966          */
12967         if (!HAS_GMCH(i915))
12968                 sanitize_watermarks(i915);
12969
12970         return 0;
12971 }
12972
12973 /* part #3: call after gem init */
12974 int intel_modeset_init(struct drm_i915_private *i915)
12975 {
12976         int ret;
12977
12978         if (!HAS_DISPLAY(i915))
12979                 return 0;
12980
12981         /*
12982          * Force all active planes to recompute their states. So that on
12983          * mode_setcrtc after probe, all the intel_plane_state variables
12984          * are already calculated and there is no assert_plane warnings
12985          * during bootup.
12986          */
12987         ret = intel_initial_commit(&i915->drm);
12988         if (ret)
12989                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12990
12991         intel_overlay_setup(i915);
12992
12993         ret = intel_fbdev_init(&i915->drm);
12994         if (ret)
12995                 return ret;
12996
12997         /* Only enable hotplug handling once the fbdev is fully set up. */
12998         intel_hpd_init(i915);
12999         intel_hpd_poll_disable(i915);
13000
13001         intel_init_ipc(i915);
13002
13003         return 0;
13004 }
13005
13006 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
13007 {
13008         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13009         /* 640x480@60Hz, ~25175 kHz */
13010         struct dpll clock = {
13011                 .m1 = 18,
13012                 .m2 = 7,
13013                 .p1 = 13,
13014                 .p2 = 4,
13015                 .n = 2,
13016         };
13017         u32 dpll, fp;
13018         int i;
13019
13020         drm_WARN_ON(&dev_priv->drm,
13021                     i9xx_calc_dpll_params(48000, &clock) != 25154);
13022
13023         drm_dbg_kms(&dev_priv->drm,
13024                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
13025                     pipe_name(pipe), clock.vco, clock.dot);
13026
13027         fp = i9xx_dpll_compute_fp(&clock);
13028         dpll = DPLL_DVO_2X_MODE |
13029                 DPLL_VGA_MODE_DIS |
13030                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
13031                 PLL_P2_DIVIDE_BY_4 |
13032                 PLL_REF_INPUT_DREFCLK |
13033                 DPLL_VCO_ENABLE;
13034
13035         intel_de_write(dev_priv, FP0(pipe), fp);
13036         intel_de_write(dev_priv, FP1(pipe), fp);
13037
13038         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
13039         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
13040         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
13041         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
13042         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
13043         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
13044         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
13045
13046         /*
13047          * Apparently we need to have VGA mode enabled prior to changing
13048          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
13049          * dividers, even though the register value does change.
13050          */
13051         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
13052         intel_de_write(dev_priv, DPLL(pipe), dpll);
13053
13054         /* Wait for the clocks to stabilize. */
13055         intel_de_posting_read(dev_priv, DPLL(pipe));
13056         udelay(150);
13057
13058         /* The pixel multiplier can only be updated once the
13059          * DPLL is enabled and the clocks are stable.
13060          *
13061          * So write it again.
13062          */
13063         intel_de_write(dev_priv, DPLL(pipe), dpll);
13064
13065         /* We do this three times for luck */
13066         for (i = 0; i < 3 ; i++) {
13067                 intel_de_write(dev_priv, DPLL(pipe), dpll);
13068                 intel_de_posting_read(dev_priv, DPLL(pipe));
13069                 udelay(150); /* wait for warmup */
13070         }
13071
13072         intel_de_write(dev_priv, PIPECONF(pipe),
13073                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
13074         intel_de_posting_read(dev_priv, PIPECONF(pipe));
13075
13076         intel_wait_for_pipe_scanline_moving(crtc);
13077 }
13078
13079 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
13080 {
13081         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13082
13083         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
13084                     pipe_name(pipe));
13085
13086         drm_WARN_ON(&dev_priv->drm,
13087                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
13088                     DISPLAY_PLANE_ENABLE);
13089         drm_WARN_ON(&dev_priv->drm,
13090                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
13091                     DISPLAY_PLANE_ENABLE);
13092         drm_WARN_ON(&dev_priv->drm,
13093                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
13094                     DISPLAY_PLANE_ENABLE);
13095         drm_WARN_ON(&dev_priv->drm,
13096                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
13097         drm_WARN_ON(&dev_priv->drm,
13098                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
13099
13100         intel_de_write(dev_priv, PIPECONF(pipe), 0);
13101         intel_de_posting_read(dev_priv, PIPECONF(pipe));
13102
13103         intel_wait_for_pipe_scanline_stopped(crtc);
13104
13105         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
13106         intel_de_posting_read(dev_priv, DPLL(pipe));
13107 }
13108
13109 static void
13110 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
13111 {
13112         struct intel_crtc *crtc;
13113
13114         if (INTEL_GEN(dev_priv) >= 4)
13115                 return;
13116
13117         for_each_intel_crtc(&dev_priv->drm, crtc) {
13118                 struct intel_plane *plane =
13119                         to_intel_plane(crtc->base.primary);
13120                 struct intel_crtc *plane_crtc;
13121                 enum pipe pipe;
13122
13123                 if (!plane->get_hw_state(plane, &pipe))
13124                         continue;
13125
13126                 if (pipe == crtc->pipe)
13127                         continue;
13128
13129                 drm_dbg_kms(&dev_priv->drm,
13130                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
13131                             plane->base.base.id, plane->base.name);
13132
13133                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13134                 intel_plane_disable_noatomic(plane_crtc, plane);
13135         }
13136 }
13137
13138 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
13139 {
13140         struct drm_device *dev = crtc->base.dev;
13141         struct intel_encoder *encoder;
13142
13143         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
13144                 return true;
13145
13146         return false;
13147 }
13148
13149 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
13150 {
13151         struct drm_device *dev = encoder->base.dev;
13152         struct intel_connector *connector;
13153
13154         for_each_connector_on_encoder(dev, &encoder->base, connector)
13155                 return connector;
13156
13157         return NULL;
13158 }
13159
13160 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
13161                               enum pipe pch_transcoder)
13162 {
13163         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
13164                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
13165 }
13166
13167 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
13168 {
13169         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13170         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13171         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
13172
13173         if (INTEL_GEN(dev_priv) >= 9 ||
13174             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
13175                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
13176                 u32 val;
13177
13178                 if (transcoder_is_dsi(cpu_transcoder))
13179                         return;
13180
13181                 val = intel_de_read(dev_priv, reg);
13182                 val &= ~HSW_FRAME_START_DELAY_MASK;
13183                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13184                 intel_de_write(dev_priv, reg, val);
13185         } else {
13186                 i915_reg_t reg = PIPECONF(cpu_transcoder);
13187                 u32 val;
13188
13189                 val = intel_de_read(dev_priv, reg);
13190                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
13191                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13192                 intel_de_write(dev_priv, reg, val);
13193         }
13194
13195         if (!crtc_state->has_pch_encoder)
13196                 return;
13197
13198         if (HAS_PCH_IBX(dev_priv)) {
13199                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
13200                 u32 val;
13201
13202                 val = intel_de_read(dev_priv, reg);
13203                 val &= ~TRANS_FRAME_START_DELAY_MASK;
13204                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13205                 intel_de_write(dev_priv, reg, val);
13206         } else {
13207                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
13208                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
13209                 u32 val;
13210
13211                 val = intel_de_read(dev_priv, reg);
13212                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
13213                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
13214                 intel_de_write(dev_priv, reg, val);
13215         }
13216 }
13217
13218 static void intel_sanitize_crtc(struct intel_crtc *crtc,
13219                                 struct drm_modeset_acquire_ctx *ctx)
13220 {
13221         struct drm_device *dev = crtc->base.dev;
13222         struct drm_i915_private *dev_priv = to_i915(dev);
13223         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
13224
13225         if (crtc_state->hw.active) {
13226                 struct intel_plane *plane;
13227
13228                 /* Clear any frame start delays used for debugging left by the BIOS */
13229                 intel_sanitize_frame_start_delay(crtc_state);
13230
13231                 /* Disable everything but the primary plane */
13232                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
13233                         const struct intel_plane_state *plane_state =
13234                                 to_intel_plane_state(plane->base.state);
13235
13236                         if (plane_state->uapi.visible &&
13237                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
13238                                 intel_plane_disable_noatomic(crtc, plane);
13239                 }
13240
13241                 /*
13242                  * Disable any background color set by the BIOS, but enable the
13243                  * gamma and CSC to match how we program our planes.
13244                  */
13245                 if (INTEL_GEN(dev_priv) >= 9)
13246                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
13247                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
13248         }
13249
13250         /* Adjust the state of the output pipe according to whether we
13251          * have active connectors/encoders. */
13252         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
13253             !crtc_state->bigjoiner_slave)
13254                 intel_crtc_disable_noatomic(crtc, ctx);
13255
13256         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
13257                 /*
13258                  * We start out with underrun reporting disabled to avoid races.
13259                  * For correct bookkeeping mark this on active crtcs.
13260                  *
13261                  * Also on gmch platforms we dont have any hardware bits to
13262                  * disable the underrun reporting. Which means we need to start
13263                  * out with underrun reporting disabled also on inactive pipes,
13264                  * since otherwise we'll complain about the garbage we read when
13265                  * e.g. coming up after runtime pm.
13266                  *
13267                  * No protection against concurrent access is required - at
13268                  * worst a fifo underrun happens which also sets this to false.
13269                  */
13270                 crtc->cpu_fifo_underrun_disabled = true;
13271                 /*
13272                  * We track the PCH trancoder underrun reporting state
13273                  * within the crtc. With crtc for pipe A housing the underrun
13274                  * reporting state for PCH transcoder A, crtc for pipe B housing
13275                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
13276                  * and marking underrun reporting as disabled for the non-existing
13277                  * PCH transcoders B and C would prevent enabling the south
13278                  * error interrupt (see cpt_can_enable_serr_int()).
13279                  */
13280                 if (has_pch_trancoder(dev_priv, crtc->pipe))
13281                         crtc->pch_fifo_underrun_disabled = true;
13282         }
13283 }
13284
13285 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
13286 {
13287         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
13288
13289         /*
13290          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
13291          * the hardware when a high res displays plugged in. DPLL P
13292          * divider is zero, and the pipe timings are bonkers. We'll
13293          * try to disable everything in that case.
13294          *
13295          * FIXME would be nice to be able to sanitize this state
13296          * without several WARNs, but for now let's take the easy
13297          * road.
13298          */
13299         return IS_GEN(dev_priv, 6) &&
13300                 crtc_state->hw.active &&
13301                 crtc_state->shared_dpll &&
13302                 crtc_state->port_clock == 0;
13303 }
13304
13305 static void intel_sanitize_encoder(struct intel_encoder *encoder)
13306 {
13307         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
13308         struct intel_connector *connector;
13309         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
13310         struct intel_crtc_state *crtc_state = crtc ?
13311                 to_intel_crtc_state(crtc->base.state) : NULL;
13312
13313         /* We need to check both for a crtc link (meaning that the
13314          * encoder is active and trying to read from a pipe) and the
13315          * pipe itself being active. */
13316         bool has_active_crtc = crtc_state &&
13317                 crtc_state->hw.active;
13318
13319         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
13320                 drm_dbg_kms(&dev_priv->drm,
13321                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
13322                             pipe_name(crtc->pipe));
13323                 has_active_crtc = false;
13324         }
13325
13326         connector = intel_encoder_find_connector(encoder);
13327         if (connector && !has_active_crtc) {
13328                 drm_dbg_kms(&dev_priv->drm,
13329                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13330                             encoder->base.base.id,
13331                             encoder->base.name);
13332
13333                 /* Connector is active, but has no active pipe. This is
13334                  * fallout from our resume register restoring. Disable
13335                  * the encoder manually again. */
13336                 if (crtc_state) {
13337                         struct drm_encoder *best_encoder;
13338
13339                         drm_dbg_kms(&dev_priv->drm,
13340                                     "[ENCODER:%d:%s] manually disabled\n",
13341                                     encoder->base.base.id,
13342                                     encoder->base.name);
13343
13344                         /* avoid oopsing in case the hooks consult best_encoder */
13345                         best_encoder = connector->base.state->best_encoder;
13346                         connector->base.state->best_encoder = &encoder->base;
13347
13348                         /* FIXME NULL atomic state passed! */
13349                         if (encoder->disable)
13350                                 encoder->disable(NULL, encoder, crtc_state,
13351                                                  connector->base.state);
13352                         if (encoder->post_disable)
13353                                 encoder->post_disable(NULL, encoder, crtc_state,
13354                                                       connector->base.state);
13355
13356                         connector->base.state->best_encoder = best_encoder;
13357                 }
13358                 encoder->base.crtc = NULL;
13359
13360                 /* Inconsistent output/port/pipe state happens presumably due to
13361                  * a bug in one of the get_hw_state functions. Or someplace else
13362                  * in our code, like the register restore mess on resume. Clamp
13363                  * things to off as a safer default. */
13364
13365                 connector->base.dpms = DRM_MODE_DPMS_OFF;
13366                 connector->base.encoder = NULL;
13367         }
13368
13369         /* notify opregion of the sanitized encoder state */
13370         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
13371
13372         if (INTEL_GEN(dev_priv) >= 11)
13373                 icl_sanitize_encoder_pll_mapping(encoder);
13374 }
13375
13376 /* FIXME read out full plane state for all planes */
13377 static void readout_plane_state(struct drm_i915_private *dev_priv)
13378 {
13379         struct intel_plane *plane;
13380         struct intel_crtc *crtc;
13381
13382         for_each_intel_plane(&dev_priv->drm, plane) {
13383                 struct intel_plane_state *plane_state =
13384                         to_intel_plane_state(plane->base.state);
13385                 struct intel_crtc_state *crtc_state;
13386                 enum pipe pipe = PIPE_A;
13387                 bool visible;
13388
13389                 visible = plane->get_hw_state(plane, &pipe);
13390
13391                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13392                 crtc_state = to_intel_crtc_state(crtc->base.state);
13393
13394                 intel_set_plane_visible(crtc_state, plane_state, visible);
13395
13396                 drm_dbg_kms(&dev_priv->drm,
13397                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
13398                             plane->base.base.id, plane->base.name,
13399                             enableddisabled(visible), pipe_name(pipe));
13400         }
13401
13402         for_each_intel_crtc(&dev_priv->drm, crtc) {
13403                 struct intel_crtc_state *crtc_state =
13404                         to_intel_crtc_state(crtc->base.state);
13405
13406                 fixup_plane_bitmasks(crtc_state);
13407         }
13408 }
13409
13410 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13411 {
13412         struct drm_i915_private *dev_priv = to_i915(dev);
13413         struct intel_cdclk_state *cdclk_state =
13414                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
13415         struct intel_dbuf_state *dbuf_state =
13416                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
13417         enum pipe pipe;
13418         struct intel_crtc *crtc;
13419         struct intel_encoder *encoder;
13420         struct intel_connector *connector;
13421         struct drm_connector_list_iter conn_iter;
13422         u8 active_pipes = 0;
13423
13424         for_each_intel_crtc(dev, crtc) {
13425                 struct intel_crtc_state *crtc_state =
13426                         to_intel_crtc_state(crtc->base.state);
13427
13428                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
13429                 intel_crtc_free_hw_state(crtc_state);
13430                 intel_crtc_state_reset(crtc_state, crtc);
13431
13432                 intel_crtc_get_pipe_config(crtc_state);
13433
13434                 crtc_state->hw.enable = crtc_state->hw.active;
13435
13436                 crtc->base.enabled = crtc_state->hw.enable;
13437                 crtc->active = crtc_state->hw.active;
13438
13439                 if (crtc_state->hw.active)
13440                         active_pipes |= BIT(crtc->pipe);
13441
13442                 drm_dbg_kms(&dev_priv->drm,
13443                             "[CRTC:%d:%s] hw state readout: %s\n",
13444                             crtc->base.base.id, crtc->base.name,
13445                             enableddisabled(crtc_state->hw.active));
13446         }
13447
13448         dev_priv->active_pipes = cdclk_state->active_pipes =
13449                 dbuf_state->active_pipes = active_pipes;
13450
13451         readout_plane_state(dev_priv);
13452
13453         intel_dpll_readout_hw_state(dev_priv);
13454
13455         for_each_intel_encoder(dev, encoder) {
13456                 pipe = 0;
13457
13458                 if (encoder->get_hw_state(encoder, &pipe)) {
13459                         struct intel_crtc_state *crtc_state;
13460
13461                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13462                         crtc_state = to_intel_crtc_state(crtc->base.state);
13463
13464                         encoder->base.crtc = &crtc->base;
13465                         intel_encoder_get_config(encoder, crtc_state);
13466                         if (encoder->sync_state)
13467                                 encoder->sync_state(encoder, crtc_state);
13468
13469                         /* read out to slave crtc as well for bigjoiner */
13470                         if (crtc_state->bigjoiner) {
13471                                 /* encoder should read be linked to bigjoiner master */
13472                                 WARN_ON(crtc_state->bigjoiner_slave);
13473
13474                                 crtc = crtc_state->bigjoiner_linked_crtc;
13475                                 crtc_state = to_intel_crtc_state(crtc->base.state);
13476                                 intel_encoder_get_config(encoder, crtc_state);
13477                         }
13478                 } else {
13479                         encoder->base.crtc = NULL;
13480                 }
13481
13482                 drm_dbg_kms(&dev_priv->drm,
13483                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13484                             encoder->base.base.id, encoder->base.name,
13485                             enableddisabled(encoder->base.crtc),
13486                             pipe_name(pipe));
13487         }
13488
13489         drm_connector_list_iter_begin(dev, &conn_iter);
13490         for_each_intel_connector_iter(connector, &conn_iter) {
13491                 if (connector->get_hw_state(connector)) {
13492                         struct intel_crtc_state *crtc_state;
13493                         struct intel_crtc *crtc;
13494
13495                         connector->base.dpms = DRM_MODE_DPMS_ON;
13496
13497                         encoder = intel_attached_encoder(connector);
13498                         connector->base.encoder = &encoder->base;
13499
13500                         crtc = to_intel_crtc(encoder->base.crtc);
13501                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
13502
13503                         if (crtc_state && crtc_state->hw.active) {
13504                                 /*
13505                                  * This has to be done during hardware readout
13506                                  * because anything calling .crtc_disable may
13507                                  * rely on the connector_mask being accurate.
13508                                  */
13509                                 crtc_state->uapi.connector_mask |=
13510                                         drm_connector_mask(&connector->base);
13511                                 crtc_state->uapi.encoder_mask |=
13512                                         drm_encoder_mask(&encoder->base);
13513                         }
13514                 } else {
13515                         connector->base.dpms = DRM_MODE_DPMS_OFF;
13516                         connector->base.encoder = NULL;
13517                 }
13518                 drm_dbg_kms(&dev_priv->drm,
13519                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
13520                             connector->base.base.id, connector->base.name,
13521                             enableddisabled(connector->base.encoder));
13522         }
13523         drm_connector_list_iter_end(&conn_iter);
13524
13525         for_each_intel_crtc(dev, crtc) {
13526                 struct intel_bw_state *bw_state =
13527                         to_intel_bw_state(dev_priv->bw_obj.state);
13528                 struct intel_crtc_state *crtc_state =
13529                         to_intel_crtc_state(crtc->base.state);
13530                 struct intel_plane *plane;
13531                 int min_cdclk = 0;
13532
13533                 if (crtc_state->bigjoiner_slave)
13534                         continue;
13535
13536                 if (crtc_state->hw.active) {
13537                         /*
13538                          * The initial mode needs to be set in order to keep
13539                          * the atomic core happy. It wants a valid mode if the
13540                          * crtc's enabled, so we do the above call.
13541                          *
13542                          * But we don't set all the derived state fully, hence
13543                          * set a flag to indicate that a full recalculation is
13544                          * needed on the next commit.
13545                          */
13546                         crtc_state->inherited = true;
13547
13548                         intel_crtc_update_active_timings(crtc_state);
13549
13550                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
13551                 }
13552
13553                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13554                         const struct intel_plane_state *plane_state =
13555                                 to_intel_plane_state(plane->base.state);
13556
13557                         /*
13558                          * FIXME don't have the fb yet, so can't
13559                          * use intel_plane_data_rate() :(
13560                          */
13561                         if (plane_state->uapi.visible)
13562                                 crtc_state->data_rate[plane->id] =
13563                                         4 * crtc_state->pixel_rate;
13564                         /*
13565                          * FIXME don't have the fb yet, so can't
13566                          * use plane->min_cdclk() :(
13567                          */
13568                         if (plane_state->uapi.visible && plane->min_cdclk) {
13569                                 if (crtc_state->double_wide ||
13570                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13571                                         crtc_state->min_cdclk[plane->id] =
13572                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13573                                 else
13574                                         crtc_state->min_cdclk[plane->id] =
13575                                                 crtc_state->pixel_rate;
13576                         }
13577                         drm_dbg_kms(&dev_priv->drm,
13578                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
13579                                     plane->base.base.id, plane->base.name,
13580                                     crtc_state->min_cdclk[plane->id]);
13581                 }
13582
13583                 if (crtc_state->hw.active) {
13584                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13585                         if (drm_WARN_ON(dev, min_cdclk < 0))
13586                                 min_cdclk = 0;
13587                 }
13588
13589                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13590                 cdclk_state->min_voltage_level[crtc->pipe] =
13591                         crtc_state->min_voltage_level;
13592
13593                 intel_bw_crtc_update(bw_state, crtc_state);
13594
13595                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
13596
13597                 /* discard our incomplete slave state, copy it from master */
13598                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
13599                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13600                         struct intel_crtc_state *slave_crtc_state =
13601                                 to_intel_crtc_state(slave->base.state);
13602
13603                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13604                         slave->base.mode = crtc->base.mode;
13605
13606                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13607                         cdclk_state->min_voltage_level[slave->pipe] =
13608                                 crtc_state->min_voltage_level;
13609
13610                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13611                                 const struct intel_plane_state *plane_state =
13612                                         to_intel_plane_state(plane->base.state);
13613
13614                                 /*
13615                                  * FIXME don't have the fb yet, so can't
13616                                  * use intel_plane_data_rate() :(
13617                                  */
13618                                 if (plane_state->uapi.visible)
13619                                         crtc_state->data_rate[plane->id] =
13620                                                 4 * crtc_state->pixel_rate;
13621                                 else
13622                                         crtc_state->data_rate[plane->id] = 0;
13623                         }
13624
13625                         intel_bw_crtc_update(bw_state, slave_crtc_state);
13626                         drm_calc_timestamping_constants(&slave->base,
13627                                                         &slave_crtc_state->hw.adjusted_mode);
13628                 }
13629         }
13630 }
13631
13632 static void
13633 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13634 {
13635         struct intel_encoder *encoder;
13636
13637         for_each_intel_encoder(&dev_priv->drm, encoder) {
13638                 struct intel_crtc_state *crtc_state;
13639
13640                 if (!encoder->get_power_domains)
13641                         continue;
13642
13643                 /*
13644                  * MST-primary and inactive encoders don't have a crtc state
13645                  * and neither of these require any power domain references.
13646                  */
13647                 if (!encoder->base.crtc)
13648                         continue;
13649
13650                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13651                 encoder->get_power_domains(encoder, crtc_state);
13652         }
13653 }
13654
13655 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13656 {
13657         /*
13658          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
13659          * Also known as Wa_14010480278.
13660          */
13661         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
13662                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13663                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13664
13665         if (IS_HASWELL(dev_priv)) {
13666                 /*
13667                  * WaRsPkgCStateDisplayPMReq:hsw
13668                  * System hang if this isn't done before disabling all planes!
13669                  */
13670                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
13671                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13672         }
13673
13674         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13675                 /* Display WA #1142:kbl,cfl,cml */
13676                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13677                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13678                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13679                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13680                              KBL_ARB_FILL_SPARE_14);
13681         }
13682 }
13683
13684 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13685                                        enum port port, i915_reg_t hdmi_reg)
13686 {
13687         u32 val = intel_de_read(dev_priv, hdmi_reg);
13688
13689         if (val & SDVO_ENABLE ||
13690             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13691                 return;
13692
13693         drm_dbg_kms(&dev_priv->drm,
13694                     "Sanitizing transcoder select for HDMI %c\n",
13695                     port_name(port));
13696
13697         val &= ~SDVO_PIPE_SEL_MASK;
13698         val |= SDVO_PIPE_SEL(PIPE_A);
13699
13700         intel_de_write(dev_priv, hdmi_reg, val);
13701 }
13702
13703 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13704                                      enum port port, i915_reg_t dp_reg)
13705 {
13706         u32 val = intel_de_read(dev_priv, dp_reg);
13707
13708         if (val & DP_PORT_EN ||
13709             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13710                 return;
13711
13712         drm_dbg_kms(&dev_priv->drm,
13713                     "Sanitizing transcoder select for DP %c\n",
13714                     port_name(port));
13715
13716         val &= ~DP_PIPE_SEL_MASK;
13717         val |= DP_PIPE_SEL(PIPE_A);
13718
13719         intel_de_write(dev_priv, dp_reg, val);
13720 }
13721
13722 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13723 {
13724         /*
13725          * The BIOS may select transcoder B on some of the PCH
13726          * ports even it doesn't enable the port. This would trip
13727          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13728          * Sanitize the transcoder select bits to prevent that. We
13729          * assume that the BIOS never actually enabled the port,
13730          * because if it did we'd actually have to toggle the port
13731          * on and back off to make the transcoder A select stick
13732          * (see. intel_dp_link_down(), intel_disable_hdmi(),
13733          * intel_disable_sdvo()).
13734          */
13735         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13736         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13737         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13738
13739         /* PCH SDVOB multiplex with HDMIB */
13740         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13741         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13742         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13743 }
13744
13745 /* Scan out the current hw modeset state,
13746  * and sanitizes it to the current state
13747  */
13748 static void
13749 intel_modeset_setup_hw_state(struct drm_device *dev,
13750                              struct drm_modeset_acquire_ctx *ctx)
13751 {
13752         struct drm_i915_private *dev_priv = to_i915(dev);
13753         struct intel_encoder *encoder;
13754         struct intel_crtc *crtc;
13755         intel_wakeref_t wakeref;
13756
13757         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13758
13759         intel_early_display_was(dev_priv);
13760         intel_modeset_readout_hw_state(dev);
13761
13762         /* HW state is read out, now we need to sanitize this mess. */
13763
13764         /* Sanitize the TypeC port mode upfront, encoders depend on this */
13765         for_each_intel_encoder(dev, encoder) {
13766                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
13767
13768                 /* We need to sanitize only the MST primary port. */
13769                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
13770                     intel_phy_is_tc(dev_priv, phy))
13771                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
13772         }
13773
13774         get_encoder_power_domains(dev_priv);
13775
13776         if (HAS_PCH_IBX(dev_priv))
13777                 ibx_sanitize_pch_ports(dev_priv);
13778
13779         /*
13780          * intel_sanitize_plane_mapping() may need to do vblank
13781          * waits, so we need vblank interrupts restored beforehand.
13782          */
13783         for_each_intel_crtc(&dev_priv->drm, crtc) {
13784                 struct intel_crtc_state *crtc_state =
13785                         to_intel_crtc_state(crtc->base.state);
13786
13787                 drm_crtc_vblank_reset(&crtc->base);
13788
13789                 if (crtc_state->hw.active)
13790                         intel_crtc_vblank_on(crtc_state);
13791         }
13792
13793         intel_sanitize_plane_mapping(dev_priv);
13794
13795         for_each_intel_encoder(dev, encoder)
13796                 intel_sanitize_encoder(encoder);
13797
13798         for_each_intel_crtc(&dev_priv->drm, crtc) {
13799                 struct intel_crtc_state *crtc_state =
13800                         to_intel_crtc_state(crtc->base.state);
13801
13802                 intel_sanitize_crtc(crtc, ctx);
13803                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13804         }
13805
13806         intel_modeset_update_connector_atomic_state(dev);
13807
13808         intel_dpll_sanitize_state(dev_priv);
13809
13810         if (IS_G4X(dev_priv)) {
13811                 g4x_wm_get_hw_state(dev_priv);
13812                 g4x_wm_sanitize(dev_priv);
13813         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13814                 vlv_wm_get_hw_state(dev_priv);
13815                 vlv_wm_sanitize(dev_priv);
13816         } else if (INTEL_GEN(dev_priv) >= 9) {
13817                 skl_wm_get_hw_state(dev_priv);
13818         } else if (HAS_PCH_SPLIT(dev_priv)) {
13819                 ilk_wm_get_hw_state(dev_priv);
13820         }
13821
13822         for_each_intel_crtc(dev, crtc) {
13823                 struct intel_crtc_state *crtc_state =
13824                         to_intel_crtc_state(crtc->base.state);
13825                 u64 put_domains;
13826
13827                 put_domains = modeset_get_crtc_power_domains(crtc_state);
13828                 if (drm_WARN_ON(dev, put_domains))
13829                         modeset_put_crtc_power_domains(crtc, put_domains);
13830         }
13831
13832         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13833 }
13834
13835 void intel_display_resume(struct drm_device *dev)
13836 {
13837         struct drm_i915_private *dev_priv = to_i915(dev);
13838         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13839         struct drm_modeset_acquire_ctx ctx;
13840         int ret;
13841
13842         dev_priv->modeset_restore_state = NULL;
13843         if (state)
13844                 state->acquire_ctx = &ctx;
13845
13846         drm_modeset_acquire_init(&ctx, 0);
13847
13848         while (1) {
13849                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
13850                 if (ret != -EDEADLK)
13851                         break;
13852
13853                 drm_modeset_backoff(&ctx);
13854         }
13855
13856         if (!ret)
13857                 ret = __intel_display_resume(dev, state, &ctx);
13858
13859         intel_enable_ipc(dev_priv);
13860         drm_modeset_drop_locks(&ctx);
13861         drm_modeset_acquire_fini(&ctx);
13862
13863         if (ret)
13864                 drm_err(&dev_priv->drm,
13865                         "Restoring old state failed with %i\n", ret);
13866         if (state)
13867                 drm_atomic_state_put(state);
13868 }
13869
13870 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13871 {
13872         struct intel_connector *connector;
13873         struct drm_connector_list_iter conn_iter;
13874
13875         /* Kill all the work that may have been queued by hpd. */
13876         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13877         for_each_intel_connector_iter(connector, &conn_iter) {
13878                 if (connector->modeset_retry_work.func)
13879                         cancel_work_sync(&connector->modeset_retry_work);
13880                 if (connector->hdcp.shim) {
13881                         cancel_delayed_work_sync(&connector->hdcp.check_work);
13882                         cancel_work_sync(&connector->hdcp.prop_work);
13883                 }
13884         }
13885         drm_connector_list_iter_end(&conn_iter);
13886 }
13887
13888 /* part #1: call before irq uninstall */
13889 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13890 {
13891         flush_workqueue(i915->flip_wq);
13892         flush_workqueue(i915->modeset_wq);
13893
13894         flush_work(&i915->atomic_helper.free_work);
13895         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13896 }
13897
13898 /* part #2: call after irq uninstall */
13899 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13900 {
13901         /*
13902          * Due to the hpd irq storm handling the hotplug work can re-arm the
13903          * poll handlers. Hence disable polling after hpd handling is shut down.
13904          */
13905         intel_hpd_poll_fini(i915);
13906
13907         /*
13908          * MST topology needs to be suspended so we don't have any calls to
13909          * fbdev after it's finalized. MST will be destroyed later as part of
13910          * drm_mode_config_cleanup()
13911          */
13912         intel_dp_mst_suspend(i915);
13913
13914         /* poll work can call into fbdev, hence clean that up afterwards */
13915         intel_fbdev_fini(i915);
13916
13917         intel_unregister_dsm_handler();
13918
13919         intel_fbc_global_disable(i915);
13920
13921         /* flush any delayed tasks or pending work */
13922         flush_scheduled_work();
13923
13924         intel_hdcp_component_fini(i915);
13925
13926         intel_mode_config_cleanup(i915);
13927
13928         intel_overlay_cleanup(i915);
13929
13930         intel_gmbus_teardown(i915);
13931
13932         destroy_workqueue(i915->flip_wq);
13933         destroy_workqueue(i915->modeset_wq);
13934
13935         intel_fbc_cleanup_cfb(i915);
13936 }
13937
13938 /* part #3: call after gem init */
13939 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13940 {
13941         intel_csr_ucode_fini(i915);
13942
13943         intel_power_domains_driver_remove(i915);
13944
13945         intel_vga_unregister(i915);
13946
13947         intel_bios_driver_remove(i915);
13948 }
13949
13950 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
13951
13952 struct intel_display_error_state {
13953
13954         u32 power_well_driver;
13955
13956         struct intel_cursor_error_state {
13957                 u32 control;
13958                 u32 position;
13959                 u32 base;
13960                 u32 size;
13961         } cursor[I915_MAX_PIPES];
13962
13963         struct intel_pipe_error_state {
13964                 bool power_domain_on;
13965                 u32 source;
13966                 u32 stat;
13967         } pipe[I915_MAX_PIPES];
13968
13969         struct intel_plane_error_state {
13970                 u32 control;
13971                 u32 stride;
13972                 u32 size;
13973                 u32 pos;
13974                 u32 addr;
13975                 u32 surface;
13976                 u32 tile_offset;
13977         } plane[I915_MAX_PIPES];
13978
13979         struct intel_transcoder_error_state {
13980                 bool available;
13981                 bool power_domain_on;
13982                 enum transcoder cpu_transcoder;
13983
13984                 u32 conf;
13985
13986                 u32 htotal;
13987                 u32 hblank;
13988                 u32 hsync;
13989                 u32 vtotal;
13990                 u32 vblank;
13991                 u32 vsync;
13992         } transcoder[5];
13993 };
13994
13995 struct intel_display_error_state *
13996 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
13997 {
13998         struct intel_display_error_state *error;
13999         int transcoders[] = {
14000                 TRANSCODER_A,
14001                 TRANSCODER_B,
14002                 TRANSCODER_C,
14003                 TRANSCODER_D,
14004                 TRANSCODER_EDP,
14005         };
14006         int i;
14007
14008         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
14009
14010         if (!HAS_DISPLAY(dev_priv))
14011                 return NULL;
14012
14013         error = kzalloc(sizeof(*error), GFP_ATOMIC);
14014         if (error == NULL)
14015                 return NULL;
14016
14017         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
14018                 error->power_well_driver = intel_de_read(dev_priv,
14019                                                          HSW_PWR_WELL_CTL2);
14020
14021         for_each_pipe(dev_priv, i) {
14022                 error->pipe[i].power_domain_on =
14023                         __intel_display_power_is_enabled(dev_priv,
14024                                                          POWER_DOMAIN_PIPE(i));
14025                 if (!error->pipe[i].power_domain_on)
14026                         continue;
14027
14028                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
14029                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
14030                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
14031
14032                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
14033                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
14034                 if (INTEL_GEN(dev_priv) <= 3) {
14035                         error->plane[i].size = intel_de_read(dev_priv,
14036                                                              DSPSIZE(i));
14037                         error->plane[i].pos = intel_de_read(dev_priv,
14038                                                             DSPPOS(i));
14039                 }
14040                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
14041                         error->plane[i].addr = intel_de_read(dev_priv,
14042                                                              DSPADDR(i));
14043                 if (INTEL_GEN(dev_priv) >= 4) {
14044                         error->plane[i].surface = intel_de_read(dev_priv,
14045                                                                 DSPSURF(i));
14046                         error->plane[i].tile_offset = intel_de_read(dev_priv,
14047                                                                     DSPTILEOFF(i));
14048                 }
14049
14050                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
14051
14052                 if (HAS_GMCH(dev_priv))
14053                         error->pipe[i].stat = intel_de_read(dev_priv,
14054                                                             PIPESTAT(i));
14055         }
14056
14057         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
14058                 enum transcoder cpu_transcoder = transcoders[i];
14059
14060                 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
14061                         continue;
14062
14063                 error->transcoder[i].available = true;
14064                 error->transcoder[i].power_domain_on =
14065                         __intel_display_power_is_enabled(dev_priv,
14066                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
14067                 if (!error->transcoder[i].power_domain_on)
14068                         continue;
14069
14070                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
14071
14072                 error->transcoder[i].conf = intel_de_read(dev_priv,
14073                                                           PIPECONF(cpu_transcoder));
14074                 error->transcoder[i].htotal = intel_de_read(dev_priv,
14075                                                             HTOTAL(cpu_transcoder));
14076                 error->transcoder[i].hblank = intel_de_read(dev_priv,
14077                                                             HBLANK(cpu_transcoder));
14078                 error->transcoder[i].hsync = intel_de_read(dev_priv,
14079                                                            HSYNC(cpu_transcoder));
14080                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
14081                                                             VTOTAL(cpu_transcoder));
14082                 error->transcoder[i].vblank = intel_de_read(dev_priv,
14083                                                             VBLANK(cpu_transcoder));
14084                 error->transcoder[i].vsync = intel_de_read(dev_priv,
14085                                                            VSYNC(cpu_transcoder));
14086         }
14087
14088         return error;
14089 }
14090
14091 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
14092
14093 void
14094 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
14095                                 struct intel_display_error_state *error)
14096 {
14097         struct drm_i915_private *dev_priv = m->i915;
14098         int i;
14099
14100         if (!error)
14101                 return;
14102
14103         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
14104         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
14105                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
14106                            error->power_well_driver);
14107         for_each_pipe(dev_priv, i) {
14108                 err_printf(m, "Pipe [%d]:\n", i);
14109                 err_printf(m, "  Power: %s\n",
14110                            onoff(error->pipe[i].power_domain_on));
14111                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
14112                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
14113
14114                 err_printf(m, "Plane [%d]:\n", i);
14115                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
14116                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
14117                 if (INTEL_GEN(dev_priv) <= 3) {
14118                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
14119                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
14120                 }
14121                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
14122                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
14123                 if (INTEL_GEN(dev_priv) >= 4) {
14124                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
14125                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
14126                 }
14127
14128                 err_printf(m, "Cursor [%d]:\n", i);
14129                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
14130                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
14131                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
14132         }
14133
14134         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
14135                 if (!error->transcoder[i].available)
14136                         continue;
14137
14138                 err_printf(m, "CPU transcoder: %s\n",
14139                            transcoder_name(error->transcoder[i].cpu_transcoder));
14140                 err_printf(m, "  Power: %s\n",
14141                            onoff(error->transcoder[i].power_domain_on));
14142                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
14143                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
14144                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
14145                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
14146                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
14147                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
14148                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
14149         }
14150 }
14151
14152 #endif