drm/i915/display: Convert gen5/gen6 tests to IS_IRONLAKE/IS_SANDYBRIDGE
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_gmbus.h"
58 #include "display/intel_hdmi.h"
59 #include "display/intel_lvds.h"
60 #include "display/intel_sdvo.h"
61 #include "display/intel_tv.h"
62 #include "display/intel_vdsc.h"
63 #include "display/intel_vrr.h"
64
65 #include "gem/i915_gem_object.h"
66
67 #include "gt/intel_rps.h"
68
69 #include "g4x_dp.h"
70 #include "g4x_hdmi.h"
71 #include "i915_drv.h"
72 #include "intel_acpi.h"
73 #include "intel_atomic.h"
74 #include "intel_atomic_plane.h"
75 #include "intel_bw.h"
76 #include "intel_cdclk.h"
77 #include "intel_color.h"
78 #include "intel_crtc.h"
79 #include "intel_csr.h"
80 #include "intel_display_types.h"
81 #include "intel_dp_link_training.h"
82 #include "intel_fbc.h"
83 #include "intel_fdi.h"
84 #include "intel_fbdev.h"
85 #include "intel_fifo_underrun.h"
86 #include "intel_frontbuffer.h"
87 #include "intel_hdcp.h"
88 #include "intel_hotplug.h"
89 #include "intel_overlay.h"
90 #include "intel_pipe_crc.h"
91 #include "intel_pm.h"
92 #include "intel_pps.h"
93 #include "intel_psr.h"
94 #include "intel_quirks.h"
95 #include "intel_sideband.h"
96 #include "intel_sprite.h"
97 #include "intel_tc.h"
98 #include "intel_vga.h"
99 #include "i9xx_plane.h"
100 #include "skl_scaler.h"
101 #include "skl_universal_plane.h"
102
103 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
104                                 struct intel_crtc_state *pipe_config);
105 static void ilk_pch_clock_get(struct intel_crtc *crtc,
106                               struct intel_crtc_state *pipe_config);
107
108 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
109                                   struct drm_i915_gem_object *obj,
110                                   struct drm_mode_fb_cmd2 *mode_cmd);
111 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
112 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
113 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
114                                          const struct intel_link_m_n *m_n,
115                                          const struct intel_link_m_n *m2_n2);
116 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
117 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
118 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
119 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
120 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
121 static void intel_modeset_setup_hw_state(struct drm_device *dev,
122                                          struct drm_modeset_acquire_ctx *ctx);
123
124 /* returns HPLL frequency in kHz */
125 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
126 {
127         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
128
129         /* Obtain SKU information */
130         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
131                 CCK_FUSE_HPLL_FREQ_MASK;
132
133         return vco_freq[hpll_freq] * 1000;
134 }
135
136 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
137                       const char *name, u32 reg, int ref_freq)
138 {
139         u32 val;
140         int divider;
141
142         val = vlv_cck_read(dev_priv, reg);
143         divider = val & CCK_FREQUENCY_VALUES;
144
145         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
146                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
147                  "%s change in progress\n", name);
148
149         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
150 }
151
152 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153                            const char *name, u32 reg)
154 {
155         int hpll;
156
157         vlv_cck_get(dev_priv);
158
159         if (dev_priv->hpll_freq == 0)
160                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
161
162         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
163
164         vlv_cck_put(dev_priv);
165
166         return hpll;
167 }
168
169 static void intel_update_czclk(struct drm_i915_private *dev_priv)
170 {
171         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
172                 return;
173
174         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
175                                                       CCK_CZ_CLOCK_CONTROL);
176
177         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
178                 dev_priv->czclk_freq);
179 }
180
181 /* WA Display #0827: Gen9:all */
182 static void
183 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
184 {
185         if (enable)
186                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
187                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
188         else
189                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
190                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
191 }
192
193 /* Wa_2006604312:icl,ehl */
194 static void
195 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
196                        bool enable)
197 {
198         if (enable)
199                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
200                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
201         else
202                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
203                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
204 }
205
206 static bool
207 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
208 {
209         return crtc_state->master_transcoder != INVALID_TRANSCODER;
210 }
211
212 static bool
213 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
214 {
215         return crtc_state->sync_mode_slaves_mask != 0;
216 }
217
218 bool
219 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
220 {
221         return is_trans_port_sync_master(crtc_state) ||
222                 is_trans_port_sync_slave(crtc_state);
223 }
224
225 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
226                                     enum pipe pipe)
227 {
228         i915_reg_t reg = PIPEDSL(pipe);
229         u32 line1, line2;
230         u32 line_mask;
231
232         if (IS_GEN(dev_priv, 2))
233                 line_mask = DSL_LINEMASK_GEN2;
234         else
235                 line_mask = DSL_LINEMASK_GEN3;
236
237         line1 = intel_de_read(dev_priv, reg) & line_mask;
238         msleep(5);
239         line2 = intel_de_read(dev_priv, reg) & line_mask;
240
241         return line1 != line2;
242 }
243
244 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
245 {
246         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
247         enum pipe pipe = crtc->pipe;
248
249         /* Wait for the display line to settle/start moving */
250         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
251                 drm_err(&dev_priv->drm,
252                         "pipe %c scanline %s wait timed out\n",
253                         pipe_name(pipe), onoff(state));
254 }
255
256 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
257 {
258         wait_for_pipe_scanline_moving(crtc, false);
259 }
260
261 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
262 {
263         wait_for_pipe_scanline_moving(crtc, true);
264 }
265
266 static void
267 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
268 {
269         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
270         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
271
272         if (INTEL_GEN(dev_priv) >= 4) {
273                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
274                 i915_reg_t reg = PIPECONF(cpu_transcoder);
275
276                 /* Wait for the Pipe State to go off */
277                 if (intel_de_wait_for_clear(dev_priv, reg,
278                                             I965_PIPECONF_ACTIVE, 100))
279                         drm_WARN(&dev_priv->drm, 1,
280                                  "pipe_off wait timed out\n");
281         } else {
282                 intel_wait_for_pipe_scanline_stopped(crtc);
283         }
284 }
285
286 /* Only for pre-ILK configs */
287 void assert_pll(struct drm_i915_private *dev_priv,
288                 enum pipe pipe, bool state)
289 {
290         u32 val;
291         bool cur_state;
292
293         val = intel_de_read(dev_priv, DPLL(pipe));
294         cur_state = !!(val & DPLL_VCO_ENABLE);
295         I915_STATE_WARN(cur_state != state,
296              "PLL state assertion failure (expected %s, current %s)\n",
297                         onoff(state), onoff(cur_state));
298 }
299
300 /* XXX: the dsi pll is shared between MIPI DSI ports */
301 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
302 {
303         u32 val;
304         bool cur_state;
305
306         vlv_cck_get(dev_priv);
307         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
308         vlv_cck_put(dev_priv);
309
310         cur_state = val & DSI_PLL_VCO_EN;
311         I915_STATE_WARN(cur_state != state,
312              "DSI PLL state assertion failure (expected %s, current %s)\n",
313                         onoff(state), onoff(cur_state));
314 }
315
316 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
317                           enum pipe pipe, bool state)
318 {
319         bool cur_state;
320
321         if (HAS_DDI(dev_priv)) {
322                 /*
323                  * DDI does not have a specific FDI_TX register.
324                  *
325                  * FDI is never fed from EDP transcoder
326                  * so pipe->transcoder cast is fine here.
327                  */
328                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
329                 u32 val = intel_de_read(dev_priv,
330                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
331                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
332         } else {
333                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
334                 cur_state = !!(val & FDI_TX_ENABLE);
335         }
336         I915_STATE_WARN(cur_state != state,
337              "FDI TX state assertion failure (expected %s, current %s)\n",
338                         onoff(state), onoff(cur_state));
339 }
340 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
341 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
342
343 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
344                           enum pipe pipe, bool state)
345 {
346         u32 val;
347         bool cur_state;
348
349         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
350         cur_state = !!(val & FDI_RX_ENABLE);
351         I915_STATE_WARN(cur_state != state,
352              "FDI RX state assertion failure (expected %s, current %s)\n",
353                         onoff(state), onoff(cur_state));
354 }
355 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
356 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
357
358 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
359                                       enum pipe pipe)
360 {
361         u32 val;
362
363         /* ILK FDI PLL is always enabled */
364         if (IS_IRONLAKE(dev_priv))
365                 return;
366
367         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
368         if (HAS_DDI(dev_priv))
369                 return;
370
371         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
372         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
373 }
374
375 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
376                        enum pipe pipe, bool state)
377 {
378         u32 val;
379         bool cur_state;
380
381         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
382         cur_state = !!(val & FDI_RX_PLL_ENABLE);
383         I915_STATE_WARN(cur_state != state,
384              "FDI RX PLL assertion failure (expected %s, current %s)\n",
385                         onoff(state), onoff(cur_state));
386 }
387
388 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
389 {
390         i915_reg_t pp_reg;
391         u32 val;
392         enum pipe panel_pipe = INVALID_PIPE;
393         bool locked = true;
394
395         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
396                 return;
397
398         if (HAS_PCH_SPLIT(dev_priv)) {
399                 u32 port_sel;
400
401                 pp_reg = PP_CONTROL(0);
402                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
403
404                 switch (port_sel) {
405                 case PANEL_PORT_SELECT_LVDS:
406                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
407                         break;
408                 case PANEL_PORT_SELECT_DPA:
409                         g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
410                         break;
411                 case PANEL_PORT_SELECT_DPC:
412                         g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
413                         break;
414                 case PANEL_PORT_SELECT_DPD:
415                         g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
416                         break;
417                 default:
418                         MISSING_CASE(port_sel);
419                         break;
420                 }
421         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
422                 /* presumably write lock depends on pipe, not port select */
423                 pp_reg = PP_CONTROL(pipe);
424                 panel_pipe = pipe;
425         } else {
426                 u32 port_sel;
427
428                 pp_reg = PP_CONTROL(0);
429                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
430
431                 drm_WARN_ON(&dev_priv->drm,
432                             port_sel != PANEL_PORT_SELECT_LVDS);
433                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
434         }
435
436         val = intel_de_read(dev_priv, pp_reg);
437         if (!(val & PANEL_POWER_ON) ||
438             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
439                 locked = false;
440
441         I915_STATE_WARN(panel_pipe == pipe && locked,
442              "panel assertion failure, pipe %c regs locked\n",
443              pipe_name(pipe));
444 }
445
446 void assert_pipe(struct drm_i915_private *dev_priv,
447                  enum transcoder cpu_transcoder, bool state)
448 {
449         bool cur_state;
450         enum intel_display_power_domain power_domain;
451         intel_wakeref_t wakeref;
452
453         /* we keep both pipes enabled on 830 */
454         if (IS_I830(dev_priv))
455                 state = true;
456
457         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
458         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
459         if (wakeref) {
460                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
461                 cur_state = !!(val & PIPECONF_ENABLE);
462
463                 intel_display_power_put(dev_priv, power_domain, wakeref);
464         } else {
465                 cur_state = false;
466         }
467
468         I915_STATE_WARN(cur_state != state,
469                         "transcoder %s assertion failure (expected %s, current %s)\n",
470                         transcoder_name(cpu_transcoder),
471                         onoff(state), onoff(cur_state));
472 }
473
474 static void assert_plane(struct intel_plane *plane, bool state)
475 {
476         enum pipe pipe;
477         bool cur_state;
478
479         cur_state = plane->get_hw_state(plane, &pipe);
480
481         I915_STATE_WARN(cur_state != state,
482                         "%s assertion failure (expected %s, current %s)\n",
483                         plane->base.name, onoff(state), onoff(cur_state));
484 }
485
486 #define assert_plane_enabled(p) assert_plane(p, true)
487 #define assert_plane_disabled(p) assert_plane(p, false)
488
489 static void assert_planes_disabled(struct intel_crtc *crtc)
490 {
491         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492         struct intel_plane *plane;
493
494         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
495                 assert_plane_disabled(plane);
496 }
497
498 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
499                                     enum pipe pipe)
500 {
501         u32 val;
502         bool enabled;
503
504         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
505         enabled = !!(val & TRANS_ENABLE);
506         I915_STATE_WARN(enabled,
507              "transcoder assertion failed, should be off on pipe %c but is still active\n",
508              pipe_name(pipe));
509 }
510
511 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
512                                    enum pipe pipe, enum port port,
513                                    i915_reg_t dp_reg)
514 {
515         enum pipe port_pipe;
516         bool state;
517
518         state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
519
520         I915_STATE_WARN(state && port_pipe == pipe,
521                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
522                         port_name(port), pipe_name(pipe));
523
524         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
525                         "IBX PCH DP %c still using transcoder B\n",
526                         port_name(port));
527 }
528
529 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
530                                      enum pipe pipe, enum port port,
531                                      i915_reg_t hdmi_reg)
532 {
533         enum pipe port_pipe;
534         bool state;
535
536         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
537
538         I915_STATE_WARN(state && port_pipe == pipe,
539                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
540                         port_name(port), pipe_name(pipe));
541
542         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
543                         "IBX PCH HDMI %c still using transcoder B\n",
544                         port_name(port));
545 }
546
547 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
548                                       enum pipe pipe)
549 {
550         enum pipe port_pipe;
551
552         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
553         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
554         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
555
556         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
557                         port_pipe == pipe,
558                         "PCH VGA enabled on transcoder %c, should be disabled\n",
559                         pipe_name(pipe));
560
561         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
562                         port_pipe == pipe,
563                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
564                         pipe_name(pipe));
565
566         /* PCH SDVOB multiplex with HDMIB */
567         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
568         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
569         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
570 }
571
572 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
573                          struct intel_digital_port *dig_port,
574                          unsigned int expected_mask)
575 {
576         u32 port_mask;
577         i915_reg_t dpll_reg;
578
579         switch (dig_port->base.port) {
580         case PORT_B:
581                 port_mask = DPLL_PORTB_READY_MASK;
582                 dpll_reg = DPLL(0);
583                 break;
584         case PORT_C:
585                 port_mask = DPLL_PORTC_READY_MASK;
586                 dpll_reg = DPLL(0);
587                 expected_mask <<= 4;
588                 break;
589         case PORT_D:
590                 port_mask = DPLL_PORTD_READY_MASK;
591                 dpll_reg = DPIO_PHY_STATUS;
592                 break;
593         default:
594                 BUG();
595         }
596
597         if (intel_de_wait_for_register(dev_priv, dpll_reg,
598                                        port_mask, expected_mask, 1000))
599                 drm_WARN(&dev_priv->drm, 1,
600                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
601                          dig_port->base.base.base.id, dig_port->base.base.name,
602                          intel_de_read(dev_priv, dpll_reg) & port_mask,
603                          expected_mask);
604 }
605
606 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
607 {
608         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
609         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
610         enum pipe pipe = crtc->pipe;
611         i915_reg_t reg;
612         u32 val, pipeconf_val;
613
614         /* Make sure PCH DPLL is enabled */
615         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
616
617         /* FDI must be feeding us bits for PCH ports */
618         assert_fdi_tx_enabled(dev_priv, pipe);
619         assert_fdi_rx_enabled(dev_priv, pipe);
620
621         if (HAS_PCH_CPT(dev_priv)) {
622                 reg = TRANS_CHICKEN2(pipe);
623                 val = intel_de_read(dev_priv, reg);
624                 /*
625                  * Workaround: Set the timing override bit
626                  * before enabling the pch transcoder.
627                  */
628                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
629                 /* Configure frame start delay to match the CPU */
630                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
631                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
632                 intel_de_write(dev_priv, reg, val);
633         }
634
635         reg = PCH_TRANSCONF(pipe);
636         val = intel_de_read(dev_priv, reg);
637         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
638
639         if (HAS_PCH_IBX(dev_priv)) {
640                 /* Configure frame start delay to match the CPU */
641                 val &= ~TRANS_FRAME_START_DELAY_MASK;
642                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
643
644                 /*
645                  * Make the BPC in transcoder be consistent with
646                  * that in pipeconf reg. For HDMI we must use 8bpc
647                  * here for both 8bpc and 12bpc.
648                  */
649                 val &= ~PIPECONF_BPC_MASK;
650                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
651                         val |= PIPECONF_8BPC;
652                 else
653                         val |= pipeconf_val & PIPECONF_BPC_MASK;
654         }
655
656         val &= ~TRANS_INTERLACE_MASK;
657         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
658                 if (HAS_PCH_IBX(dev_priv) &&
659                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
660                         val |= TRANS_LEGACY_INTERLACED_ILK;
661                 else
662                         val |= TRANS_INTERLACED;
663         } else {
664                 val |= TRANS_PROGRESSIVE;
665         }
666
667         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
668         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
669                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
670                         pipe_name(pipe));
671 }
672
673 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
674                                       enum transcoder cpu_transcoder)
675 {
676         u32 val, pipeconf_val;
677
678         /* FDI must be feeding us bits for PCH ports */
679         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
680         assert_fdi_rx_enabled(dev_priv, PIPE_A);
681
682         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
683         /* Workaround: set timing override bit. */
684         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
685         /* Configure frame start delay to match the CPU */
686         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
687         val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
688         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
689
690         val = TRANS_ENABLE;
691         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
692
693         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
694             PIPECONF_INTERLACED_ILK)
695                 val |= TRANS_INTERLACED;
696         else
697                 val |= TRANS_PROGRESSIVE;
698
699         intel_de_write(dev_priv, LPT_TRANSCONF, val);
700         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
701                                   TRANS_STATE_ENABLE, 100))
702                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
703 }
704
705 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
706                                        enum pipe pipe)
707 {
708         i915_reg_t reg;
709         u32 val;
710
711         /* FDI relies on the transcoder */
712         assert_fdi_tx_disabled(dev_priv, pipe);
713         assert_fdi_rx_disabled(dev_priv, pipe);
714
715         /* Ports must be off as well */
716         assert_pch_ports_disabled(dev_priv, pipe);
717
718         reg = PCH_TRANSCONF(pipe);
719         val = intel_de_read(dev_priv, reg);
720         val &= ~TRANS_ENABLE;
721         intel_de_write(dev_priv, reg, val);
722         /* wait for PCH transcoder off, transcoder state */
723         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
724                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
725                         pipe_name(pipe));
726
727         if (HAS_PCH_CPT(dev_priv)) {
728                 /* Workaround: Clear the timing override chicken bit again. */
729                 reg = TRANS_CHICKEN2(pipe);
730                 val = intel_de_read(dev_priv, reg);
731                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
732                 intel_de_write(dev_priv, reg, val);
733         }
734 }
735
736 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
737 {
738         u32 val;
739
740         val = intel_de_read(dev_priv, LPT_TRANSCONF);
741         val &= ~TRANS_ENABLE;
742         intel_de_write(dev_priv, LPT_TRANSCONF, val);
743         /* wait for PCH transcoder off, transcoder state */
744         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
745                                     TRANS_STATE_ENABLE, 50))
746                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
747
748         /* Workaround: clear timing override bit. */
749         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
750         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
751         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
752 }
753
754 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
755 {
756         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
757
758         if (HAS_PCH_LPT(dev_priv))
759                 return PIPE_A;
760         else
761                 return crtc->pipe;
762 }
763
764 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
765 {
766         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
767         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
768         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
769         enum pipe pipe = crtc->pipe;
770         i915_reg_t reg;
771         u32 val;
772
773         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
774
775         assert_planes_disabled(crtc);
776
777         /*
778          * A pipe without a PLL won't actually be able to drive bits from
779          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
780          * need the check.
781          */
782         if (HAS_GMCH(dev_priv)) {
783                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
784                         assert_dsi_pll_enabled(dev_priv);
785                 else
786                         assert_pll_enabled(dev_priv, pipe);
787         } else {
788                 if (new_crtc_state->has_pch_encoder) {
789                         /* if driving the PCH, we need FDI enabled */
790                         assert_fdi_rx_pll_enabled(dev_priv,
791                                                   intel_crtc_pch_transcoder(crtc));
792                         assert_fdi_tx_pll_enabled(dev_priv,
793                                                   (enum pipe) cpu_transcoder);
794                 }
795                 /* FIXME: assert CPU port conditions for SNB+ */
796         }
797
798         reg = PIPECONF(cpu_transcoder);
799         val = intel_de_read(dev_priv, reg);
800         if (val & PIPECONF_ENABLE) {
801                 /* we keep both pipes enabled on 830 */
802                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
803                 return;
804         }
805
806         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
807         intel_de_posting_read(dev_priv, reg);
808
809         /*
810          * Until the pipe starts PIPEDSL reads will return a stale value,
811          * which causes an apparent vblank timestamp jump when PIPEDSL
812          * resets to its proper value. That also messes up the frame count
813          * when it's derived from the timestamps. So let's wait for the
814          * pipe to start properly before we call drm_crtc_vblank_on()
815          */
816         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
817                 intel_wait_for_pipe_scanline_moving(crtc);
818 }
819
820 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
821 {
822         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
823         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
824         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
825         enum pipe pipe = crtc->pipe;
826         i915_reg_t reg;
827         u32 val;
828
829         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
830
831         /*
832          * Make sure planes won't keep trying to pump pixels to us,
833          * or we might hang the display.
834          */
835         assert_planes_disabled(crtc);
836
837         reg = PIPECONF(cpu_transcoder);
838         val = intel_de_read(dev_priv, reg);
839         if ((val & PIPECONF_ENABLE) == 0)
840                 return;
841
842         /*
843          * Double wide has implications for planes
844          * so best keep it disabled when not needed.
845          */
846         if (old_crtc_state->double_wide)
847                 val &= ~PIPECONF_DOUBLE_WIDE;
848
849         /* Don't disable pipe or pipe PLLs if needed */
850         if (!IS_I830(dev_priv))
851                 val &= ~PIPECONF_ENABLE;
852
853         intel_de_write(dev_priv, reg, val);
854         if ((val & PIPECONF_ENABLE) == 0)
855                 intel_wait_for_pipe_off(old_crtc_state);
856 }
857
858 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
859 {
860         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
861 }
862
863 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
864 {
865         if (is_ccs_modifier(fb->modifier))
866                 return is_ccs_plane(fb, plane);
867
868         return plane == 1;
869 }
870
871 bool
872 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
873                                     u64 modifier)
874 {
875         return info->is_yuv &&
876                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
877 }
878
879 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
880                                    int color_plane)
881 {
882         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
883                color_plane == 1;
884 }
885
886 unsigned int
887 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
888 {
889         struct drm_i915_private *dev_priv = to_i915(fb->dev);
890         unsigned int cpp = fb->format->cpp[color_plane];
891
892         switch (fb->modifier) {
893         case DRM_FORMAT_MOD_LINEAR:
894                 return intel_tile_size(dev_priv);
895         case I915_FORMAT_MOD_X_TILED:
896                 if (IS_GEN(dev_priv, 2))
897                         return 128;
898                 else
899                         return 512;
900         case I915_FORMAT_MOD_Y_TILED_CCS:
901                 if (is_ccs_plane(fb, color_plane))
902                         return 128;
903                 fallthrough;
904         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
905         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
906         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
907                 if (is_ccs_plane(fb, color_plane))
908                         return 64;
909                 fallthrough;
910         case I915_FORMAT_MOD_Y_TILED:
911                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
912                         return 128;
913                 else
914                         return 512;
915         case I915_FORMAT_MOD_Yf_TILED_CCS:
916                 if (is_ccs_plane(fb, color_plane))
917                         return 128;
918                 fallthrough;
919         case I915_FORMAT_MOD_Yf_TILED:
920                 switch (cpp) {
921                 case 1:
922                         return 64;
923                 case 2:
924                 case 4:
925                         return 128;
926                 case 8:
927                 case 16:
928                         return 256;
929                 default:
930                         MISSING_CASE(cpp);
931                         return cpp;
932                 }
933                 break;
934         default:
935                 MISSING_CASE(fb->modifier);
936                 return cpp;
937         }
938 }
939
940 unsigned int
941 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
942 {
943         if (is_gen12_ccs_plane(fb, color_plane))
944                 return 1;
945
946         return intel_tile_size(to_i915(fb->dev)) /
947                 intel_tile_width_bytes(fb, color_plane);
948 }
949
950 /* Return the tile dimensions in pixel units */
951 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
952                             unsigned int *tile_width,
953                             unsigned int *tile_height)
954 {
955         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
956         unsigned int cpp = fb->format->cpp[color_plane];
957
958         *tile_width = tile_width_bytes / cpp;
959         *tile_height = intel_tile_height(fb, color_plane);
960 }
961
962 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
963                                         int color_plane)
964 {
965         unsigned int tile_width, tile_height;
966
967         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
968
969         return fb->pitches[color_plane] * tile_height;
970 }
971
972 unsigned int
973 intel_fb_align_height(const struct drm_framebuffer *fb,
974                       int color_plane, unsigned int height)
975 {
976         unsigned int tile_height = intel_tile_height(fb, color_plane);
977
978         return ALIGN(height, tile_height);
979 }
980
981 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
982 {
983         unsigned int size = 0;
984         int i;
985
986         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
987                 size += rot_info->plane[i].width * rot_info->plane[i].height;
988
989         return size;
990 }
991
992 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
993 {
994         unsigned int size = 0;
995         int i;
996
997         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
998                 size += rem_info->plane[i].width * rem_info->plane[i].height;
999
1000         return size;
1001 }
1002
1003 static void
1004 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1005                         const struct drm_framebuffer *fb,
1006                         unsigned int rotation)
1007 {
1008         view->type = I915_GGTT_VIEW_NORMAL;
1009         if (drm_rotation_90_or_270(rotation)) {
1010                 view->type = I915_GGTT_VIEW_ROTATED;
1011                 view->rotated = to_intel_framebuffer(fb)->rot_info;
1012         }
1013 }
1014
1015 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1016 {
1017         if (IS_I830(dev_priv))
1018                 return 16 * 1024;
1019         else if (IS_I85X(dev_priv))
1020                 return 256;
1021         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1022                 return 32;
1023         else
1024                 return 4 * 1024;
1025 }
1026
1027 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1028 {
1029         if (INTEL_GEN(dev_priv) >= 9)
1030                 return 256 * 1024;
1031         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1032                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1033                 return 128 * 1024;
1034         else if (INTEL_GEN(dev_priv) >= 4)
1035                 return 4 * 1024;
1036         else
1037                 return 0;
1038 }
1039
1040 static bool has_async_flips(struct drm_i915_private *i915)
1041 {
1042         return INTEL_GEN(i915) >= 5;
1043 }
1044
1045 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1046                                   int color_plane)
1047 {
1048         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1049
1050         /* AUX_DIST needs only 4K alignment */
1051         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
1052             is_ccs_plane(fb, color_plane))
1053                 return 4096;
1054
1055         switch (fb->modifier) {
1056         case DRM_FORMAT_MOD_LINEAR:
1057                 return intel_linear_alignment(dev_priv);
1058         case I915_FORMAT_MOD_X_TILED:
1059                 if (has_async_flips(dev_priv))
1060                         return 256 * 1024;
1061                 return 0;
1062         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1063                 if (is_semiplanar_uv_plane(fb, color_plane))
1064                         return intel_tile_row_size(fb, color_plane);
1065                 fallthrough;
1066         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1067         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1068                 return 16 * 1024;
1069         case I915_FORMAT_MOD_Y_TILED_CCS:
1070         case I915_FORMAT_MOD_Yf_TILED_CCS:
1071         case I915_FORMAT_MOD_Y_TILED:
1072                 if (INTEL_GEN(dev_priv) >= 12 &&
1073                     is_semiplanar_uv_plane(fb, color_plane))
1074                         return intel_tile_row_size(fb, color_plane);
1075                 fallthrough;
1076         case I915_FORMAT_MOD_Yf_TILED:
1077                 return 1 * 1024 * 1024;
1078         default:
1079                 MISSING_CASE(fb->modifier);
1080                 return 0;
1081         }
1082 }
1083
1084 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1085 {
1086         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1087         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1088
1089         return INTEL_GEN(dev_priv) < 4 ||
1090                 (plane->has_fbc &&
1091                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
1092 }
1093
1094 struct i915_vma *
1095 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1096                            const struct i915_ggtt_view *view,
1097                            bool uses_fence,
1098                            unsigned long *out_flags)
1099 {
1100         struct drm_device *dev = fb->dev;
1101         struct drm_i915_private *dev_priv = to_i915(dev);
1102         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1103         intel_wakeref_t wakeref;
1104         struct i915_vma *vma;
1105         unsigned int pinctl;
1106         u32 alignment;
1107
1108         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1109                 return ERR_PTR(-EINVAL);
1110
1111         alignment = intel_surf_alignment(fb, 0);
1112         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1113                 return ERR_PTR(-EINVAL);
1114
1115         /* Note that the w/a also requires 64 PTE of padding following the
1116          * bo. We currently fill all unused PTE with the shadow page and so
1117          * we should always have valid PTE following the scanout preventing
1118          * the VT-d warning.
1119          */
1120         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1121                 alignment = 256 * 1024;
1122
1123         /*
1124          * Global gtt pte registers are special registers which actually forward
1125          * writes to a chunk of system memory. Which means that there is no risk
1126          * that the register values disappear as soon as we call
1127          * intel_runtime_pm_put(), so it is correct to wrap only the
1128          * pin/unpin/fence and not more.
1129          */
1130         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1131
1132         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1133
1134         /*
1135          * Valleyview is definitely limited to scanning out the first
1136          * 512MiB. Lets presume this behaviour was inherited from the
1137          * g4x display engine and that all earlier gen are similarly
1138          * limited. Testing suggests that it is a little more
1139          * complicated than this. For example, Cherryview appears quite
1140          * happy to scanout from anywhere within its global aperture.
1141          */
1142         pinctl = 0;
1143         if (HAS_GMCH(dev_priv))
1144                 pinctl |= PIN_MAPPABLE;
1145
1146         vma = i915_gem_object_pin_to_display_plane(obj,
1147                                                    alignment, view, pinctl);
1148         if (IS_ERR(vma))
1149                 goto err;
1150
1151         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1152                 int ret;
1153
1154                 /*
1155                  * Install a fence for tiled scan-out. Pre-i965 always needs a
1156                  * fence, whereas 965+ only requires a fence if using
1157                  * framebuffer compression.  For simplicity, we always, when
1158                  * possible, install a fence as the cost is not that onerous.
1159                  *
1160                  * If we fail to fence the tiled scanout, then either the
1161                  * modeset will reject the change (which is highly unlikely as
1162                  * the affected systems, all but one, do not have unmappable
1163                  * space) or we will not be able to enable full powersaving
1164                  * techniques (also likely not to apply due to various limits
1165                  * FBC and the like impose on the size of the buffer, which
1166                  * presumably we violated anyway with this unmappable buffer).
1167                  * Anyway, it is presumably better to stumble onwards with
1168                  * something and try to run the system in a "less than optimal"
1169                  * mode that matches the user configuration.
1170                  */
1171                 ret = i915_vma_pin_fence(vma);
1172                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1173                         i915_vma_unpin(vma);
1174                         vma = ERR_PTR(ret);
1175                         goto err;
1176                 }
1177
1178                 if (ret == 0 && vma->fence)
1179                         *out_flags |= PLANE_HAS_FENCE;
1180         }
1181
1182         i915_vma_get(vma);
1183 err:
1184         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1185         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1186         return vma;
1187 }
1188
1189 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1190 {
1191         if (flags & PLANE_HAS_FENCE)
1192                 i915_vma_unpin_fence(vma);
1193         i915_vma_unpin(vma);
1194         i915_vma_put(vma);
1195 }
1196
1197 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
1198                           unsigned int rotation)
1199 {
1200         if (drm_rotation_90_or_270(rotation))
1201                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
1202         else
1203                 return fb->pitches[color_plane];
1204 }
1205
1206 /*
1207  * Convert the x/y offsets into a linear offset.
1208  * Only valid with 0/180 degree rotation, which is fine since linear
1209  * offset is only used with linear buffers on pre-hsw and tiled buffers
1210  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1211  */
1212 u32 intel_fb_xy_to_linear(int x, int y,
1213                           const struct intel_plane_state *state,
1214                           int color_plane)
1215 {
1216         const struct drm_framebuffer *fb = state->hw.fb;
1217         unsigned int cpp = fb->format->cpp[color_plane];
1218         unsigned int pitch = state->color_plane[color_plane].stride;
1219
1220         return y * pitch + x * cpp;
1221 }
1222
1223 /*
1224  * Add the x/y offsets derived from fb->offsets[] to the user
1225  * specified plane src x/y offsets. The resulting x/y offsets
1226  * specify the start of scanout from the beginning of the gtt mapping.
1227  */
1228 void intel_add_fb_offsets(int *x, int *y,
1229                           const struct intel_plane_state *state,
1230                           int color_plane)
1231
1232 {
1233         *x += state->color_plane[color_plane].x;
1234         *y += state->color_plane[color_plane].y;
1235 }
1236
1237 static u32 intel_adjust_tile_offset(int *x, int *y,
1238                                     unsigned int tile_width,
1239                                     unsigned int tile_height,
1240                                     unsigned int tile_size,
1241                                     unsigned int pitch_tiles,
1242                                     u32 old_offset,
1243                                     u32 new_offset)
1244 {
1245         unsigned int pitch_pixels = pitch_tiles * tile_width;
1246         unsigned int tiles;
1247
1248         WARN_ON(old_offset & (tile_size - 1));
1249         WARN_ON(new_offset & (tile_size - 1));
1250         WARN_ON(new_offset > old_offset);
1251
1252         tiles = (old_offset - new_offset) / tile_size;
1253
1254         *y += tiles / pitch_tiles * tile_height;
1255         *x += tiles % pitch_tiles * tile_width;
1256
1257         /* minimize x in case it got needlessly big */
1258         *y += *x / pitch_pixels * tile_height;
1259         *x %= pitch_pixels;
1260
1261         return new_offset;
1262 }
1263
1264 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
1265 {
1266         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
1267                is_gen12_ccs_plane(fb, color_plane);
1268 }
1269
1270 static u32 intel_adjust_aligned_offset(int *x, int *y,
1271                                        const struct drm_framebuffer *fb,
1272                                        int color_plane,
1273                                        unsigned int rotation,
1274                                        unsigned int pitch,
1275                                        u32 old_offset, u32 new_offset)
1276 {
1277         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1278         unsigned int cpp = fb->format->cpp[color_plane];
1279
1280         drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
1281
1282         if (!is_surface_linear(fb, color_plane)) {
1283                 unsigned int tile_size, tile_width, tile_height;
1284                 unsigned int pitch_tiles;
1285
1286                 tile_size = intel_tile_size(dev_priv);
1287                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1288
1289                 if (drm_rotation_90_or_270(rotation)) {
1290                         pitch_tiles = pitch / tile_height;
1291                         swap(tile_width, tile_height);
1292                 } else {
1293                         pitch_tiles = pitch / (tile_width * cpp);
1294                 }
1295
1296                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1297                                          tile_size, pitch_tiles,
1298                                          old_offset, new_offset);
1299         } else {
1300                 old_offset += *y * pitch + *x * cpp;
1301
1302                 *y = (old_offset - new_offset) / pitch;
1303                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
1304         }
1305
1306         return new_offset;
1307 }
1308
1309 /*
1310  * Adjust the tile offset by moving the difference into
1311  * the x/y offsets.
1312  */
1313 u32 intel_plane_adjust_aligned_offset(int *x, int *y,
1314                                       const struct intel_plane_state *state,
1315                                       int color_plane,
1316                                       u32 old_offset, u32 new_offset)
1317 {
1318         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
1319                                            state->hw.rotation,
1320                                            state->color_plane[color_plane].stride,
1321                                            old_offset, new_offset);
1322 }
1323
1324 /*
1325  * Computes the aligned offset to the base tile and adjusts
1326  * x, y. bytes per pixel is assumed to be a power-of-two.
1327  *
1328  * In the 90/270 rotated case, x and y are assumed
1329  * to be already rotated to match the rotated GTT view, and
1330  * pitch is the tile_height aligned framebuffer height.
1331  *
1332  * This function is used when computing the derived information
1333  * under intel_framebuffer, so using any of that information
1334  * here is not allowed. Anything under drm_framebuffer can be
1335  * used. This is why the user has to pass in the pitch since it
1336  * is specified in the rotated orientation.
1337  */
1338 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
1339                                         int *x, int *y,
1340                                         const struct drm_framebuffer *fb,
1341                                         int color_plane,
1342                                         unsigned int pitch,
1343                                         unsigned int rotation,
1344                                         u32 alignment)
1345 {
1346         unsigned int cpp = fb->format->cpp[color_plane];
1347         u32 offset, offset_aligned;
1348
1349         if (!is_surface_linear(fb, color_plane)) {
1350                 unsigned int tile_size, tile_width, tile_height;
1351                 unsigned int tile_rows, tiles, pitch_tiles;
1352
1353                 tile_size = intel_tile_size(dev_priv);
1354                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1355
1356                 if (drm_rotation_90_or_270(rotation)) {
1357                         pitch_tiles = pitch / tile_height;
1358                         swap(tile_width, tile_height);
1359                 } else {
1360                         pitch_tiles = pitch / (tile_width * cpp);
1361                 }
1362
1363                 tile_rows = *y / tile_height;
1364                 *y %= tile_height;
1365
1366                 tiles = *x / tile_width;
1367                 *x %= tile_width;
1368
1369                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
1370
1371                 offset_aligned = offset;
1372                 if (alignment)
1373                         offset_aligned = rounddown(offset_aligned, alignment);
1374
1375                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1376                                          tile_size, pitch_tiles,
1377                                          offset, offset_aligned);
1378         } else {
1379                 offset = *y * pitch + *x * cpp;
1380                 offset_aligned = offset;
1381                 if (alignment) {
1382                         offset_aligned = rounddown(offset_aligned, alignment);
1383                         *y = (offset % alignment) / pitch;
1384                         *x = ((offset % alignment) - *y * pitch) / cpp;
1385                 } else {
1386                         *y = *x = 0;
1387                 }
1388         }
1389
1390         return offset_aligned;
1391 }
1392
1393 u32 intel_plane_compute_aligned_offset(int *x, int *y,
1394                                        const struct intel_plane_state *state,
1395                                        int color_plane)
1396 {
1397         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
1398         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
1399         const struct drm_framebuffer *fb = state->hw.fb;
1400         unsigned int rotation = state->hw.rotation;
1401         int pitch = state->color_plane[color_plane].stride;
1402         u32 alignment;
1403
1404         if (intel_plane->id == PLANE_CURSOR)
1405                 alignment = intel_cursor_alignment(dev_priv);
1406         else
1407                 alignment = intel_surf_alignment(fb, color_plane);
1408
1409         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
1410                                             pitch, rotation, alignment);
1411 }
1412
1413 /* Convert the fb->offset[] into x/y offsets */
1414 static int intel_fb_offset_to_xy(int *x, int *y,
1415                                  const struct drm_framebuffer *fb,
1416                                  int color_plane)
1417 {
1418         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1419         unsigned int height;
1420         u32 alignment;
1421
1422         if (INTEL_GEN(dev_priv) >= 12 &&
1423             is_semiplanar_uv_plane(fb, color_plane))
1424                 alignment = intel_tile_row_size(fb, color_plane);
1425         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
1426                 alignment = intel_tile_size(dev_priv);
1427         else
1428                 alignment = 0;
1429
1430         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
1431                 drm_dbg_kms(&dev_priv->drm,
1432                             "Misaligned offset 0x%08x for color plane %d\n",
1433                             fb->offsets[color_plane], color_plane);
1434                 return -EINVAL;
1435         }
1436
1437         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
1438         height = ALIGN(height, intel_tile_height(fb, color_plane));
1439
1440         /* Catch potential overflows early */
1441         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
1442                             fb->offsets[color_plane])) {
1443                 drm_dbg_kms(&dev_priv->drm,
1444                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
1445                             fb->offsets[color_plane], fb->pitches[color_plane],
1446                             color_plane);
1447                 return -ERANGE;
1448         }
1449
1450         *x = 0;
1451         *y = 0;
1452
1453         intel_adjust_aligned_offset(x, y,
1454                                     fb, color_plane, DRM_MODE_ROTATE_0,
1455                                     fb->pitches[color_plane],
1456                                     fb->offsets[color_plane], 0);
1457
1458         return 0;
1459 }
1460
1461 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1462 {
1463         switch (fb_modifier) {
1464         case I915_FORMAT_MOD_X_TILED:
1465                 return I915_TILING_X;
1466         case I915_FORMAT_MOD_Y_TILED:
1467         case I915_FORMAT_MOD_Y_TILED_CCS:
1468         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1469         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1470         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1471                 return I915_TILING_Y;
1472         default:
1473                 return I915_TILING_NONE;
1474         }
1475 }
1476
1477 /*
1478  * From the Sky Lake PRM:
1479  * "The Color Control Surface (CCS) contains the compression status of
1480  *  the cache-line pairs. The compression state of the cache-line pair
1481  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1482  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1483  *  cache-line-pairs. CCS is always Y tiled."
1484  *
1485  * Since cache line pairs refers to horizontally adjacent cache lines,
1486  * each cache line in the CCS corresponds to an area of 32x16 cache
1487  * lines on the main surface. Since each pixel is 4 bytes, this gives
1488  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1489  * main surface.
1490  */
1491 static const struct drm_format_info skl_ccs_formats[] = {
1492         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1493           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1494         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1495           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1496         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1497           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1498         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1499           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1500 };
1501
1502 /*
1503  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1504  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1505  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1506  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1507  * the main surface.
1508  */
1509 static const struct drm_format_info gen12_ccs_formats[] = {
1510         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1511           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1512           .hsub = 1, .vsub = 1, },
1513         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1514           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1515           .hsub = 1, .vsub = 1, },
1516         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1517           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1518           .hsub = 1, .vsub = 1, .has_alpha = true },
1519         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1520           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1521           .hsub = 1, .vsub = 1, .has_alpha = true },
1522         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1523           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1524           .hsub = 2, .vsub = 1, .is_yuv = true },
1525         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1526           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1527           .hsub = 2, .vsub = 1, .is_yuv = true },
1528         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1529           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1530           .hsub = 2, .vsub = 1, .is_yuv = true },
1531         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1532           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1533           .hsub = 2, .vsub = 1, .is_yuv = true },
1534         { .format = DRM_FORMAT_NV12, .num_planes = 4,
1535           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1536           .hsub = 2, .vsub = 2, .is_yuv = true },
1537         { .format = DRM_FORMAT_P010, .num_planes = 4,
1538           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1539           .hsub = 2, .vsub = 2, .is_yuv = true },
1540         { .format = DRM_FORMAT_P012, .num_planes = 4,
1541           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1542           .hsub = 2, .vsub = 2, .is_yuv = true },
1543         { .format = DRM_FORMAT_P016, .num_planes = 4,
1544           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1545           .hsub = 2, .vsub = 2, .is_yuv = true },
1546 };
1547
1548 /*
1549  * Same as gen12_ccs_formats[] above, but with additional surface used
1550  * to pass Clear Color information in plane 2 with 64 bits of data.
1551  */
1552 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1553         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1554           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1555           .hsub = 1, .vsub = 1, },
1556         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1557           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1558           .hsub = 1, .vsub = 1, },
1559         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1560           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1561           .hsub = 1, .vsub = 1, .has_alpha = true },
1562         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1563           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1564           .hsub = 1, .vsub = 1, .has_alpha = true },
1565 };
1566
1567 static const struct drm_format_info *
1568 lookup_format_info(const struct drm_format_info formats[],
1569                    int num_formats, u32 format)
1570 {
1571         int i;
1572
1573         for (i = 0; i < num_formats; i++) {
1574                 if (formats[i].format == format)
1575                         return &formats[i];
1576         }
1577
1578         return NULL;
1579 }
1580
1581 static const struct drm_format_info *
1582 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1583 {
1584         switch (cmd->modifier[0]) {
1585         case I915_FORMAT_MOD_Y_TILED_CCS:
1586         case I915_FORMAT_MOD_Yf_TILED_CCS:
1587                 return lookup_format_info(skl_ccs_formats,
1588                                           ARRAY_SIZE(skl_ccs_formats),
1589                                           cmd->pixel_format);
1590         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1591         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1592                 return lookup_format_info(gen12_ccs_formats,
1593                                           ARRAY_SIZE(gen12_ccs_formats),
1594                                           cmd->pixel_format);
1595         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1596                 return lookup_format_info(gen12_ccs_cc_formats,
1597                                           ARRAY_SIZE(gen12_ccs_cc_formats),
1598                                           cmd->pixel_format);
1599         default:
1600                 return NULL;
1601         }
1602 }
1603
1604 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1605 {
1606         return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1607                             512) * 64;
1608 }
1609
1610 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1611                               u32 pixel_format, u64 modifier)
1612 {
1613         struct intel_crtc *crtc;
1614         struct intel_plane *plane;
1615
1616         /*
1617          * We assume the primary plane for pipe A has
1618          * the highest stride limits of them all,
1619          * if in case pipe A is disabled, use the first pipe from pipe_mask.
1620          */
1621         crtc = intel_get_first_crtc(dev_priv);
1622         if (!crtc)
1623                 return 0;
1624
1625         plane = to_intel_plane(crtc->base.primary);
1626
1627         return plane->max_stride(plane, pixel_format, modifier,
1628                                  DRM_MODE_ROTATE_0);
1629 }
1630
1631 static
1632 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1633                         u32 pixel_format, u64 modifier)
1634 {
1635         /*
1636          * Arbitrary limit for gen4+ chosen to match the
1637          * render engine max stride.
1638          *
1639          * The new CCS hash mode makes remapping impossible
1640          */
1641         if (!is_ccs_modifier(modifier)) {
1642                 if (INTEL_GEN(dev_priv) >= 7)
1643                         return 256*1024;
1644                 else if (INTEL_GEN(dev_priv) >= 4)
1645                         return 128*1024;
1646         }
1647
1648         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1649 }
1650
1651 static u32
1652 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1653 {
1654         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1655         u32 tile_width;
1656
1657         if (is_surface_linear(fb, color_plane)) {
1658                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1659                                                            fb->format->format,
1660                                                            fb->modifier);
1661
1662                 /*
1663                  * To make remapping with linear generally feasible
1664                  * we need the stride to be page aligned.
1665                  */
1666                 if (fb->pitches[color_plane] > max_stride &&
1667                     !is_ccs_modifier(fb->modifier))
1668                         return intel_tile_size(dev_priv);
1669                 else
1670                         return 64;
1671         }
1672
1673         tile_width = intel_tile_width_bytes(fb, color_plane);
1674         if (is_ccs_modifier(fb->modifier)) {
1675                 /*
1676                  * Display WA #0531: skl,bxt,kbl,glk
1677                  *
1678                  * Render decompression and plane width > 3840
1679                  * combined with horizontal panning requires the
1680                  * plane stride to be a multiple of 4. We'll just
1681                  * require the entire fb to accommodate that to avoid
1682                  * potential runtime errors at plane configuration time.
1683                  */
1684                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1685                         tile_width *= 4;
1686                 /*
1687                  * The main surface pitch must be padded to a multiple of four
1688                  * tile widths.
1689                  */
1690                 else if (INTEL_GEN(dev_priv) >= 12)
1691                         tile_width *= 4;
1692         }
1693         return tile_width;
1694 }
1695
1696 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
1697 {
1698         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1699         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1700         const struct drm_framebuffer *fb = plane_state->hw.fb;
1701         int i;
1702
1703         /* We don't want to deal with remapping with cursors */
1704         if (plane->id == PLANE_CURSOR)
1705                 return false;
1706
1707         /*
1708          * The display engine limits already match/exceed the
1709          * render engine limits, so not much point in remapping.
1710          * Would also need to deal with the fence POT alignment
1711          * and gen2 2KiB GTT tile size.
1712          */
1713         if (INTEL_GEN(dev_priv) < 4)
1714                 return false;
1715
1716         /*
1717          * The new CCS hash mode isn't compatible with remapping as
1718          * the virtual address of the pages affects the compressed data.
1719          */
1720         if (is_ccs_modifier(fb->modifier))
1721                 return false;
1722
1723         /* Linear needs a page aligned stride for remapping */
1724         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1725                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
1726
1727                 for (i = 0; i < fb->format->num_planes; i++) {
1728                         if (fb->pitches[i] & alignment)
1729                                 return false;
1730                 }
1731         }
1732
1733         return true;
1734 }
1735
1736 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
1737 {
1738         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1739         const struct drm_framebuffer *fb = plane_state->hw.fb;
1740         unsigned int rotation = plane_state->hw.rotation;
1741         u32 stride, max_stride;
1742
1743         /*
1744          * No remapping for invisible planes since we don't have
1745          * an actual source viewport to remap.
1746          */
1747         if (!plane_state->uapi.visible)
1748                 return false;
1749
1750         if (!intel_plane_can_remap(plane_state))
1751                 return false;
1752
1753         /*
1754          * FIXME: aux plane limits on gen9+ are
1755          * unclear in Bspec, for now no checking.
1756          */
1757         stride = intel_fb_pitch(fb, 0, rotation);
1758         max_stride = plane->max_stride(plane, fb->format->format,
1759                                        fb->modifier, rotation);
1760
1761         return stride > max_stride;
1762 }
1763
1764 void
1765 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
1766                                const struct drm_framebuffer *fb,
1767                                int color_plane)
1768 {
1769         int main_plane;
1770
1771         if (color_plane == 0) {
1772                 *hsub = 1;
1773                 *vsub = 1;
1774
1775                 return;
1776         }
1777
1778         /*
1779          * TODO: Deduct the subsampling from the char block for all CCS
1780          * formats and planes.
1781          */
1782         if (!is_gen12_ccs_plane(fb, color_plane)) {
1783                 *hsub = fb->format->hsub;
1784                 *vsub = fb->format->vsub;
1785
1786                 return;
1787         }
1788
1789         main_plane = skl_ccs_to_main_plane(fb, color_plane);
1790         *hsub = drm_format_info_block_width(fb->format, color_plane) /
1791                 drm_format_info_block_width(fb->format, main_plane);
1792
1793         /*
1794          * The min stride check in the core framebuffer_check() function
1795          * assumes that format->hsub applies to every plane except for the
1796          * first plane. That's incorrect for the CCS AUX plane of the first
1797          * plane, but for the above check to pass we must define the block
1798          * width with that subsampling applied to it. Adjust the width here
1799          * accordingly, so we can calculate the actual subsampling factor.
1800          */
1801         if (main_plane == 0)
1802                 *hsub *= fb->format->hsub;
1803
1804         *vsub = 32;
1805 }
1806 static int
1807 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
1808 {
1809         struct drm_i915_private *i915 = to_i915(fb->dev);
1810         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1811         int main_plane;
1812         int hsub, vsub;
1813         int tile_width, tile_height;
1814         int ccs_x, ccs_y;
1815         int main_x, main_y;
1816
1817         if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
1818                 return 0;
1819
1820         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
1821         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
1822
1823         tile_width *= hsub;
1824         tile_height *= vsub;
1825
1826         ccs_x = (x * hsub) % tile_width;
1827         ccs_y = (y * vsub) % tile_height;
1828
1829         main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
1830         main_x = intel_fb->normal[main_plane].x % tile_width;
1831         main_y = intel_fb->normal[main_plane].y % tile_height;
1832
1833         /*
1834          * CCS doesn't have its own x/y offset register, so the intra CCS tile
1835          * x/y offsets must match between CCS and the main surface.
1836          */
1837         if (main_x != ccs_x || main_y != ccs_y) {
1838                 drm_dbg_kms(&i915->drm,
1839                               "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
1840                               main_x, main_y,
1841                               ccs_x, ccs_y,
1842                               intel_fb->normal[main_plane].x,
1843                               intel_fb->normal[main_plane].y,
1844                               x, y);
1845                 return -EINVAL;
1846         }
1847
1848         return 0;
1849 }
1850
1851 static void
1852 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
1853 {
1854         int main_plane = is_ccs_plane(fb, color_plane) ?
1855                          skl_ccs_to_main_plane(fb, color_plane) : 0;
1856         int main_hsub, main_vsub;
1857         int hsub, vsub;
1858
1859         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
1860         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
1861         *w = fb->width / main_hsub / hsub;
1862         *h = fb->height / main_vsub / vsub;
1863 }
1864
1865 /*
1866  * Setup the rotated view for an FB plane and return the size the GTT mapping
1867  * requires for this view.
1868  */
1869 static u32
1870 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
1871                   u32 gtt_offset_rotated, int x, int y,
1872                   unsigned int width, unsigned int height,
1873                   unsigned int tile_size,
1874                   unsigned int tile_width, unsigned int tile_height,
1875                   struct drm_framebuffer *fb)
1876 {
1877         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1878         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
1879         unsigned int pitch_tiles;
1880         struct drm_rect r;
1881
1882         /* Y or Yf modifiers required for 90/270 rotation */
1883         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
1884             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
1885                 return 0;
1886
1887         if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
1888                 return 0;
1889
1890         rot_info->plane[plane] = *plane_info;
1891
1892         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
1893
1894         /* rotate the x/y offsets to match the GTT view */
1895         drm_rect_init(&r, x, y, width, height);
1896         drm_rect_rotate(&r,
1897                         plane_info->width * tile_width,
1898                         plane_info->height * tile_height,
1899                         DRM_MODE_ROTATE_270);
1900         x = r.x1;
1901         y = r.y1;
1902
1903         /* rotate the tile dimensions to match the GTT view */
1904         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
1905         swap(tile_width, tile_height);
1906
1907         /*
1908          * We only keep the x/y offsets, so push all of the
1909          * gtt offset into the x/y offsets.
1910          */
1911         intel_adjust_tile_offset(&x, &y,
1912                                  tile_width, tile_height,
1913                                  tile_size, pitch_tiles,
1914                                  gtt_offset_rotated * tile_size, 0);
1915
1916         /*
1917          * First pixel of the framebuffer from
1918          * the start of the rotated gtt mapping.
1919          */
1920         intel_fb->rotated[plane].x = x;
1921         intel_fb->rotated[plane].y = y;
1922
1923         return plane_info->width * plane_info->height;
1924 }
1925
1926 static int
1927 intel_fill_fb_info(struct drm_i915_private *dev_priv,
1928                    struct drm_framebuffer *fb)
1929 {
1930         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1931         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1932         u32 gtt_offset_rotated = 0;
1933         unsigned int max_size = 0;
1934         int i, num_planes = fb->format->num_planes;
1935         unsigned int tile_size = intel_tile_size(dev_priv);
1936
1937         for (i = 0; i < num_planes; i++) {
1938                 unsigned int width, height;
1939                 unsigned int cpp, size;
1940                 u32 offset;
1941                 int x, y;
1942                 int ret;
1943
1944                 /*
1945                  * Plane 2 of Render Compression with Clear Color fb modifier
1946                  * is consumed by the driver and not passed to DE. Skip the
1947                  * arithmetic related to alignment and offset calculation.
1948                  */
1949                 if (is_gen12_ccs_cc_plane(fb, i)) {
1950                         if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
1951                                 continue;
1952                         else
1953                                 return -EINVAL;
1954                 }
1955
1956                 cpp = fb->format->cpp[i];
1957                 intel_fb_plane_dims(&width, &height, fb, i);
1958
1959                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
1960                 if (ret) {
1961                         drm_dbg_kms(&dev_priv->drm,
1962                                     "bad fb plane %d offset: 0x%x\n",
1963                                     i, fb->offsets[i]);
1964                         return ret;
1965                 }
1966
1967                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
1968                 if (ret)
1969                         return ret;
1970
1971                 /*
1972                  * The fence (if used) is aligned to the start of the object
1973                  * so having the framebuffer wrap around across the edge of the
1974                  * fenced region doesn't really work. We have no API to configure
1975                  * the fence start offset within the object (nor could we probably
1976                  * on gen2/3). So it's just easier if we just require that the
1977                  * fb layout agrees with the fence layout. We already check that the
1978                  * fb stride matches the fence stride elsewhere.
1979                  */
1980                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
1981                     (x + width) * cpp > fb->pitches[i]) {
1982                         drm_dbg_kms(&dev_priv->drm,
1983                                     "bad fb plane %d offset: 0x%x\n",
1984                                      i, fb->offsets[i]);
1985                         return -EINVAL;
1986                 }
1987
1988                 /*
1989                  * First pixel of the framebuffer from
1990                  * the start of the normal gtt mapping.
1991                  */
1992                 intel_fb->normal[i].x = x;
1993                 intel_fb->normal[i].y = y;
1994
1995                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
1996                                                       fb->pitches[i],
1997                                                       DRM_MODE_ROTATE_0,
1998                                                       tile_size);
1999                 offset /= tile_size;
2000
2001                 if (!is_surface_linear(fb, i)) {
2002                         struct intel_remapped_plane_info plane_info;
2003                         unsigned int tile_width, tile_height;
2004
2005                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2006
2007                         plane_info.offset = offset;
2008                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
2009                                                          tile_width * cpp);
2010                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
2011                         plane_info.height = DIV_ROUND_UP(y + height,
2012                                                          tile_height);
2013
2014                         /* how many tiles does this plane need */
2015                         size = plane_info.stride * plane_info.height;
2016                         /*
2017                          * If the plane isn't horizontally tile aligned,
2018                          * we need one more tile.
2019                          */
2020                         if (x != 0)
2021                                 size++;
2022
2023                         gtt_offset_rotated +=
2024                                 setup_fb_rotation(i, &plane_info,
2025                                                   gtt_offset_rotated,
2026                                                   x, y, width, height,
2027                                                   tile_size,
2028                                                   tile_width, tile_height,
2029                                                   fb);
2030                 } else {
2031                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2032                                             x * cpp, tile_size);
2033                 }
2034
2035                 /* how many tiles in total needed in the bo */
2036                 max_size = max(max_size, offset + size);
2037         }
2038
2039         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2040                 drm_dbg_kms(&dev_priv->drm,
2041                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
2042                             mul_u32_u32(max_size, tile_size), obj->base.size);
2043                 return -EINVAL;
2044         }
2045
2046         return 0;
2047 }
2048
2049 static void
2050 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2051 {
2052         struct drm_i915_private *dev_priv =
2053                 to_i915(plane_state->uapi.plane->dev);
2054         struct drm_framebuffer *fb = plane_state->hw.fb;
2055         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2056         struct intel_rotation_info *info = &plane_state->view.rotated;
2057         unsigned int rotation = plane_state->hw.rotation;
2058         int i, num_planes = fb->format->num_planes;
2059         unsigned int tile_size = intel_tile_size(dev_priv);
2060         unsigned int src_x, src_y;
2061         unsigned int src_w, src_h;
2062         u32 gtt_offset = 0;
2063
2064         memset(&plane_state->view, 0, sizeof(plane_state->view));
2065         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2066                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2067
2068         src_x = plane_state->uapi.src.x1 >> 16;
2069         src_y = plane_state->uapi.src.y1 >> 16;
2070         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2071         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2072
2073         drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
2074
2075         /* Make src coordinates relative to the viewport */
2076         drm_rect_translate(&plane_state->uapi.src,
2077                            -(src_x << 16), -(src_y << 16));
2078
2079         /* Rotate src coordinates to match rotated GTT view */
2080         if (drm_rotation_90_or_270(rotation))
2081                 drm_rect_rotate(&plane_state->uapi.src,
2082                                 src_w << 16, src_h << 16,
2083                                 DRM_MODE_ROTATE_270);
2084
2085         for (i = 0; i < num_planes; i++) {
2086                 unsigned int hsub = i ? fb->format->hsub : 1;
2087                 unsigned int vsub = i ? fb->format->vsub : 1;
2088                 unsigned int cpp = fb->format->cpp[i];
2089                 unsigned int tile_width, tile_height;
2090                 unsigned int width, height;
2091                 unsigned int pitch_tiles;
2092                 unsigned int x, y;
2093                 u32 offset;
2094
2095                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2096
2097                 x = src_x / hsub;
2098                 y = src_y / vsub;
2099                 width = src_w / hsub;
2100                 height = src_h / vsub;
2101
2102                 /*
2103                  * First pixel of the src viewport from the
2104                  * start of the normal gtt mapping.
2105                  */
2106                 x += intel_fb->normal[i].x;
2107                 y += intel_fb->normal[i].y;
2108
2109                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2110                                                       fb, i, fb->pitches[i],
2111                                                       DRM_MODE_ROTATE_0, tile_size);
2112                 offset /= tile_size;
2113
2114                 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
2115                 info->plane[i].offset = offset;
2116                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2117                                                      tile_width * cpp);
2118                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2119                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2120
2121                 if (drm_rotation_90_or_270(rotation)) {
2122                         struct drm_rect r;
2123
2124                         /* rotate the x/y offsets to match the GTT view */
2125                         drm_rect_init(&r, x, y, width, height);
2126                         drm_rect_rotate(&r,
2127                                         info->plane[i].width * tile_width,
2128                                         info->plane[i].height * tile_height,
2129                                         DRM_MODE_ROTATE_270);
2130                         x = r.x1;
2131                         y = r.y1;
2132
2133                         pitch_tiles = info->plane[i].height;
2134                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2135
2136                         /* rotate the tile dimensions to match the GTT view */
2137                         swap(tile_width, tile_height);
2138                 } else {
2139                         pitch_tiles = info->plane[i].width;
2140                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2141                 }
2142
2143                 /*
2144                  * We only keep the x/y offsets, so push all of the
2145                  * gtt offset into the x/y offsets.
2146                  */
2147                 intel_adjust_tile_offset(&x, &y,
2148                                          tile_width, tile_height,
2149                                          tile_size, pitch_tiles,
2150                                          gtt_offset * tile_size, 0);
2151
2152                 gtt_offset += info->plane[i].width * info->plane[i].height;
2153
2154                 plane_state->color_plane[i].offset = 0;
2155                 plane_state->color_plane[i].x = x;
2156                 plane_state->color_plane[i].y = y;
2157         }
2158 }
2159
2160 int
2161 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2162 {
2163         const struct intel_framebuffer *fb =
2164                 to_intel_framebuffer(plane_state->hw.fb);
2165         unsigned int rotation = plane_state->hw.rotation;
2166         int i, num_planes;
2167
2168         if (!fb)
2169                 return 0;
2170
2171         num_planes = fb->base.format->num_planes;
2172
2173         if (intel_plane_needs_remap(plane_state)) {
2174                 intel_plane_remap_gtt(plane_state);
2175
2176                 /*
2177                  * Sometimes even remapping can't overcome
2178                  * the stride limitations :( Can happen with
2179                  * big plane sizes and suitably misaligned
2180                  * offsets.
2181                  */
2182                 return intel_plane_check_stride(plane_state);
2183         }
2184
2185         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2186
2187         for (i = 0; i < num_planes; i++) {
2188                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2189                 plane_state->color_plane[i].offset = 0;
2190
2191                 if (drm_rotation_90_or_270(rotation)) {
2192                         plane_state->color_plane[i].x = fb->rotated[i].x;
2193                         plane_state->color_plane[i].y = fb->rotated[i].y;
2194                 } else {
2195                         plane_state->color_plane[i].x = fb->normal[i].x;
2196                         plane_state->color_plane[i].y = fb->normal[i].y;
2197                 }
2198         }
2199
2200         /* Rotate src coordinates to match rotated GTT view */
2201         if (drm_rotation_90_or_270(rotation))
2202                 drm_rect_rotate(&plane_state->uapi.src,
2203                                 fb->base.width << 16, fb->base.height << 16,
2204                                 DRM_MODE_ROTATE_270);
2205
2206         return intel_plane_check_stride(plane_state);
2207 }
2208
2209 static struct i915_vma *
2210 initial_plane_vma(struct drm_i915_private *i915,
2211                   struct intel_initial_plane_config *plane_config)
2212 {
2213         struct drm_i915_gem_object *obj;
2214         struct i915_vma *vma;
2215         u32 base, size;
2216
2217         if (plane_config->size == 0)
2218                 return NULL;
2219
2220         base = round_down(plane_config->base,
2221                           I915_GTT_MIN_ALIGNMENT);
2222         size = round_up(plane_config->base + plane_config->size,
2223                         I915_GTT_MIN_ALIGNMENT);
2224         size -= base;
2225
2226         /*
2227          * If the FB is too big, just don't use it since fbdev is not very
2228          * important and we should probably use that space with FBC or other
2229          * features.
2230          */
2231         if (size * 2 > i915->stolen_usable_size)
2232                 return NULL;
2233
2234         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
2235         if (IS_ERR(obj))
2236                 return NULL;
2237
2238         /*
2239          * Mark it WT ahead of time to avoid changing the
2240          * cache_level during fbdev initialization. The
2241          * unbind there would get stuck waiting for rcu.
2242          */
2243         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
2244                                             I915_CACHE_WT : I915_CACHE_NONE);
2245
2246         switch (plane_config->tiling) {
2247         case I915_TILING_NONE:
2248                 break;
2249         case I915_TILING_X:
2250         case I915_TILING_Y:
2251                 obj->tiling_and_stride =
2252                         plane_config->fb->base.pitches[0] |
2253                         plane_config->tiling;
2254                 break;
2255         default:
2256                 MISSING_CASE(plane_config->tiling);
2257                 goto err_obj;
2258         }
2259
2260         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
2261         if (IS_ERR(vma))
2262                 goto err_obj;
2263
2264         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
2265                 goto err_obj;
2266
2267         if (i915_gem_object_is_tiled(obj) &&
2268             !i915_vma_is_map_and_fenceable(vma))
2269                 goto err_obj;
2270
2271         return vma;
2272
2273 err_obj:
2274         i915_gem_object_put(obj);
2275         return NULL;
2276 }
2277
2278 static bool
2279 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2280                               struct intel_initial_plane_config *plane_config)
2281 {
2282         struct drm_device *dev = crtc->base.dev;
2283         struct drm_i915_private *dev_priv = to_i915(dev);
2284         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2285         struct drm_framebuffer *fb = &plane_config->fb->base;
2286         struct i915_vma *vma;
2287
2288         switch (fb->modifier) {
2289         case DRM_FORMAT_MOD_LINEAR:
2290         case I915_FORMAT_MOD_X_TILED:
2291         case I915_FORMAT_MOD_Y_TILED:
2292                 break;
2293         default:
2294                 drm_dbg(&dev_priv->drm,
2295                         "Unsupported modifier for initial FB: 0x%llx\n",
2296                         fb->modifier);
2297                 return false;
2298         }
2299
2300         vma = initial_plane_vma(dev_priv, plane_config);
2301         if (!vma)
2302                 return false;
2303
2304         mode_cmd.pixel_format = fb->format->format;
2305         mode_cmd.width = fb->width;
2306         mode_cmd.height = fb->height;
2307         mode_cmd.pitches[0] = fb->pitches[0];
2308         mode_cmd.modifier[0] = fb->modifier;
2309         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2310
2311         if (intel_framebuffer_init(to_intel_framebuffer(fb),
2312                                    vma->obj, &mode_cmd)) {
2313                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
2314                 goto err_vma;
2315         }
2316
2317         plane_config->vma = vma;
2318         return true;
2319
2320 err_vma:
2321         i915_vma_put(vma);
2322         return false;
2323 }
2324
2325 static void
2326 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2327                         struct intel_plane_state *plane_state,
2328                         bool visible)
2329 {
2330         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2331
2332         plane_state->uapi.visible = visible;
2333
2334         if (visible)
2335                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
2336         else
2337                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
2338 }
2339
2340 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
2341 {
2342         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2343         struct drm_plane *plane;
2344
2345         /*
2346          * Active_planes aliases if multiple "primary" or cursor planes
2347          * have been used on the same (or wrong) pipe. plane_mask uses
2348          * unique ids, hence we can use that to reconstruct active_planes.
2349          */
2350         crtc_state->enabled_planes = 0;
2351         crtc_state->active_planes = 0;
2352
2353         drm_for_each_plane_mask(plane, &dev_priv->drm,
2354                                 crtc_state->uapi.plane_mask) {
2355                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
2356                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2357         }
2358 }
2359
2360 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2361                                          struct intel_plane *plane)
2362 {
2363         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2364         struct intel_crtc_state *crtc_state =
2365                 to_intel_crtc_state(crtc->base.state);
2366         struct intel_plane_state *plane_state =
2367                 to_intel_plane_state(plane->base.state);
2368
2369         drm_dbg_kms(&dev_priv->drm,
2370                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2371                     plane->base.base.id, plane->base.name,
2372                     crtc->base.base.id, crtc->base.name);
2373
2374         intel_set_plane_visible(crtc_state, plane_state, false);
2375         fixup_plane_bitmasks(crtc_state);
2376         crtc_state->data_rate[plane->id] = 0;
2377         crtc_state->min_cdclk[plane->id] = 0;
2378
2379         if (plane->id == PLANE_PRIMARY)
2380                 hsw_disable_ips(crtc_state);
2381
2382         /*
2383          * Vblank time updates from the shadow to live plane control register
2384          * are blocked if the memory self-refresh mode is active at that
2385          * moment. So to make sure the plane gets truly disabled, disable
2386          * first the self-refresh mode. The self-refresh enable bit in turn
2387          * will be checked/applied by the HW only at the next frame start
2388          * event which is after the vblank start event, so we need to have a
2389          * wait-for-vblank between disabling the plane and the pipe.
2390          */
2391         if (HAS_GMCH(dev_priv) &&
2392             intel_set_memory_cxsr(dev_priv, false))
2393                 intel_wait_for_vblank(dev_priv, crtc->pipe);
2394
2395         /*
2396          * Gen2 reports pipe underruns whenever all planes are disabled.
2397          * So disable underrun reporting before all the planes get disabled.
2398          */
2399         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
2400                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
2401
2402         intel_disable_plane(plane, crtc_state);
2403         intel_wait_for_vblank(dev_priv, crtc->pipe);
2404 }
2405
2406 static void
2407 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2408                              struct intel_initial_plane_config *plane_config)
2409 {
2410         struct drm_device *dev = intel_crtc->base.dev;
2411         struct drm_i915_private *dev_priv = to_i915(dev);
2412         struct drm_crtc *c;
2413         struct drm_plane *primary = intel_crtc->base.primary;
2414         struct drm_plane_state *plane_state = primary->state;
2415         struct intel_plane *intel_plane = to_intel_plane(primary);
2416         struct intel_plane_state *intel_state =
2417                 to_intel_plane_state(plane_state);
2418         struct intel_crtc_state *crtc_state =
2419                 to_intel_crtc_state(intel_crtc->base.state);
2420         struct drm_framebuffer *fb;
2421         struct i915_vma *vma;
2422
2423         if (!plane_config->fb)
2424                 return;
2425
2426         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2427                 fb = &plane_config->fb->base;
2428                 vma = plane_config->vma;
2429                 goto valid_fb;
2430         }
2431
2432         /*
2433          * Failed to alloc the obj, check to see if we should share
2434          * an fb with another CRTC instead
2435          */
2436         for_each_crtc(dev, c) {
2437                 struct intel_plane_state *state;
2438
2439                 if (c == &intel_crtc->base)
2440                         continue;
2441
2442                 if (!to_intel_crtc_state(c->state)->uapi.active)
2443                         continue;
2444
2445                 state = to_intel_plane_state(c->primary->state);
2446                 if (!state->vma)
2447                         continue;
2448
2449                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2450                         fb = state->hw.fb;
2451                         vma = state->vma;
2452                         goto valid_fb;
2453                 }
2454         }
2455
2456         /*
2457          * We've failed to reconstruct the BIOS FB.  Current display state
2458          * indicates that the primary plane is visible, but has a NULL FB,
2459          * which will lead to problems later if we don't fix it up.  The
2460          * simplest solution is to just disable the primary plane now and
2461          * pretend the BIOS never had it enabled.
2462          */
2463         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2464         if (crtc_state->bigjoiner) {
2465                 struct intel_crtc *slave =
2466                         crtc_state->bigjoiner_linked_crtc;
2467                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2468         }
2469
2470         return;
2471
2472 valid_fb:
2473         intel_state->hw.rotation = plane_config->rotation;
2474         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2475                                 intel_state->hw.rotation);
2476         intel_state->color_plane[0].stride =
2477                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
2478
2479         __i915_vma_pin(vma);
2480         intel_state->vma = i915_vma_get(vma);
2481         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
2482                 if (vma->fence)
2483                         intel_state->flags |= PLANE_HAS_FENCE;
2484
2485         plane_state->src_x = 0;
2486         plane_state->src_y = 0;
2487         plane_state->src_w = fb->width << 16;
2488         plane_state->src_h = fb->height << 16;
2489
2490         plane_state->crtc_x = 0;
2491         plane_state->crtc_y = 0;
2492         plane_state->crtc_w = fb->width;
2493         plane_state->crtc_h = fb->height;
2494
2495         intel_state->uapi.src = drm_plane_state_src(plane_state);
2496         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
2497
2498         if (plane_config->tiling)
2499                 dev_priv->preserve_bios_swizzle = true;
2500
2501         plane_state->fb = fb;
2502         drm_framebuffer_get(fb);
2503
2504         plane_state->crtc = &intel_crtc->base;
2505         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2506                                           intel_crtc);
2507
2508         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2509
2510         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2511                   &to_intel_frontbuffer(fb)->bits);
2512 }
2513
2514 unsigned int
2515 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2516 {
2517         int x = 0, y = 0;
2518
2519         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2520                                           plane_state->color_plane[0].offset, 0);
2521
2522         return y;
2523 }
2524
2525 static int
2526 __intel_display_resume(struct drm_device *dev,
2527                        struct drm_atomic_state *state,
2528                        struct drm_modeset_acquire_ctx *ctx)
2529 {
2530         struct drm_crtc_state *crtc_state;
2531         struct drm_crtc *crtc;
2532         int i, ret;
2533
2534         intel_modeset_setup_hw_state(dev, ctx);
2535         intel_vga_redisable(to_i915(dev));
2536
2537         if (!state)
2538                 return 0;
2539
2540         /*
2541          * We've duplicated the state, pointers to the old state are invalid.
2542          *
2543          * Don't attempt to use the old state until we commit the duplicated state.
2544          */
2545         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2546                 /*
2547                  * Force recalculation even if we restore
2548                  * current state. With fast modeset this may not result
2549                  * in a modeset when the state is compatible.
2550                  */
2551                 crtc_state->mode_changed = true;
2552         }
2553
2554         /* ignore any reset values/BIOS leftovers in the WM registers */
2555         if (!HAS_GMCH(to_i915(dev)))
2556                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
2557
2558         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2559
2560         drm_WARN_ON(dev, ret == -EDEADLK);
2561         return ret;
2562 }
2563
2564 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2565 {
2566         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2567                 intel_has_gpu_reset(&dev_priv->gt));
2568 }
2569
2570 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2571 {
2572         struct drm_device *dev = &dev_priv->drm;
2573         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2574         struct drm_atomic_state *state;
2575         int ret;
2576
2577         if (!HAS_DISPLAY(dev_priv))
2578                 return;
2579
2580         /* reset doesn't touch the display */
2581         if (!dev_priv->params.force_reset_modeset_test &&
2582             !gpu_reset_clobbers_display(dev_priv))
2583                 return;
2584
2585         /* We have a modeset vs reset deadlock, defensively unbreak it. */
2586         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2587         smp_mb__after_atomic();
2588         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2589
2590         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2591                 drm_dbg_kms(&dev_priv->drm,
2592                             "Modeset potentially stuck, unbreaking through wedging\n");
2593                 intel_gt_set_wedged(&dev_priv->gt);
2594         }
2595
2596         /*
2597          * Need mode_config.mutex so that we don't
2598          * trample ongoing ->detect() and whatnot.
2599          */
2600         mutex_lock(&dev->mode_config.mutex);
2601         drm_modeset_acquire_init(ctx, 0);
2602         while (1) {
2603                 ret = drm_modeset_lock_all_ctx(dev, ctx);
2604                 if (ret != -EDEADLK)
2605                         break;
2606
2607                 drm_modeset_backoff(ctx);
2608         }
2609         /*
2610          * Disabling the crtcs gracefully seems nicer. Also the
2611          * g33 docs say we should at least disable all the planes.
2612          */
2613         state = drm_atomic_helper_duplicate_state(dev, ctx);
2614         if (IS_ERR(state)) {
2615                 ret = PTR_ERR(state);
2616                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2617                         ret);
2618                 return;
2619         }
2620
2621         ret = drm_atomic_helper_disable_all(dev, ctx);
2622         if (ret) {
2623                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2624                         ret);
2625                 drm_atomic_state_put(state);
2626                 return;
2627         }
2628
2629         dev_priv->modeset_restore_state = state;
2630         state->acquire_ctx = ctx;
2631 }
2632
2633 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2634 {
2635         struct drm_device *dev = &dev_priv->drm;
2636         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2637         struct drm_atomic_state *state;
2638         int ret;
2639
2640         if (!HAS_DISPLAY(dev_priv))
2641                 return;
2642
2643         /* reset doesn't touch the display */
2644         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2645                 return;
2646
2647         state = fetch_and_zero(&dev_priv->modeset_restore_state);
2648         if (!state)
2649                 goto unlock;
2650
2651         /* reset doesn't touch the display */
2652         if (!gpu_reset_clobbers_display(dev_priv)) {
2653                 /* for testing only restore the display */
2654                 ret = __intel_display_resume(dev, state, ctx);
2655                 if (ret)
2656                         drm_err(&dev_priv->drm,
2657                                 "Restoring old state failed with %i\n", ret);
2658         } else {
2659                 /*
2660                  * The display has been reset as well,
2661                  * so need a full re-initialization.
2662                  */
2663                 intel_pps_unlock_regs_wa(dev_priv);
2664                 intel_modeset_init_hw(dev_priv);
2665                 intel_init_clock_gating(dev_priv);
2666                 intel_hpd_init(dev_priv);
2667
2668                 ret = __intel_display_resume(dev, state, ctx);
2669                 if (ret)
2670                         drm_err(&dev_priv->drm,
2671                                 "Restoring old state failed with %i\n", ret);
2672
2673                 intel_hpd_poll_disable(dev_priv);
2674         }
2675
2676         drm_atomic_state_put(state);
2677 unlock:
2678         drm_modeset_drop_locks(ctx);
2679         drm_modeset_acquire_fini(ctx);
2680         mutex_unlock(&dev->mode_config.mutex);
2681
2682         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2683 }
2684
2685 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2686 {
2687         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2688         enum pipe pipe = crtc->pipe;
2689         u32 tmp;
2690
2691         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2692
2693         /*
2694          * Display WA #1153: icl
2695          * enable hardware to bypass the alpha math
2696          * and rounding for per-pixel values 00 and 0xff
2697          */
2698         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2699         /*
2700          * Display WA # 1605353570: icl
2701          * Set the pixel rounding bit to 1 for allowing
2702          * passthrough of Frame buffer pixels unmodified
2703          * across pipe
2704          */
2705         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2706         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2707 }
2708
2709 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2710 {
2711         struct drm_crtc *crtc;
2712         bool cleanup_done;
2713
2714         drm_for_each_crtc(crtc, &dev_priv->drm) {
2715                 struct drm_crtc_commit *commit;
2716                 spin_lock(&crtc->commit_lock);
2717                 commit = list_first_entry_or_null(&crtc->commit_list,
2718                                                   struct drm_crtc_commit, commit_entry);
2719                 cleanup_done = commit ?
2720                         try_wait_for_completion(&commit->cleanup_done) : true;
2721                 spin_unlock(&crtc->commit_lock);
2722
2723                 if (cleanup_done)
2724                         continue;
2725
2726                 drm_crtc_wait_one_vblank(crtc);
2727
2728                 return true;
2729         }
2730
2731         return false;
2732 }
2733
2734 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2735 {
2736         u32 temp;
2737
2738         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2739
2740         mutex_lock(&dev_priv->sb_lock);
2741
2742         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2743         temp |= SBI_SSCCTL_DISABLE;
2744         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2745
2746         mutex_unlock(&dev_priv->sb_lock);
2747 }
2748
2749 /* Program iCLKIP clock to the desired frequency */
2750 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2751 {
2752         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2753         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2754         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2755         u32 divsel, phaseinc, auxdiv, phasedir = 0;
2756         u32 temp;
2757
2758         lpt_disable_iclkip(dev_priv);
2759
2760         /* The iCLK virtual clock root frequency is in MHz,
2761          * but the adjusted_mode->crtc_clock in in KHz. To get the
2762          * divisors, it is necessary to divide one by another, so we
2763          * convert the virtual clock precision to KHz here for higher
2764          * precision.
2765          */
2766         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2767                 u32 iclk_virtual_root_freq = 172800 * 1000;
2768                 u32 iclk_pi_range = 64;
2769                 u32 desired_divisor;
2770
2771                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2772                                                     clock << auxdiv);
2773                 divsel = (desired_divisor / iclk_pi_range) - 2;
2774                 phaseinc = desired_divisor % iclk_pi_range;
2775
2776                 /*
2777                  * Near 20MHz is a corner case which is
2778                  * out of range for the 7-bit divisor
2779                  */
2780                 if (divsel <= 0x7f)
2781                         break;
2782         }
2783
2784         /* This should not happen with any sane values */
2785         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2786                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2787         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2788                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2789
2790         drm_dbg_kms(&dev_priv->drm,
2791                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2792                     clock, auxdiv, divsel, phasedir, phaseinc);
2793
2794         mutex_lock(&dev_priv->sb_lock);
2795
2796         /* Program SSCDIVINTPHASE6 */
2797         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2798         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2799         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2800         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2801         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2802         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2803         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2804         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2805
2806         /* Program SSCAUXDIV */
2807         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2808         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2809         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2810         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2811
2812         /* Enable modulator and associated divider */
2813         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2814         temp &= ~SBI_SSCCTL_DISABLE;
2815         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2816
2817         mutex_unlock(&dev_priv->sb_lock);
2818
2819         /* Wait for initialization time */
2820         udelay(24);
2821
2822         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2823 }
2824
2825 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2826 {
2827         u32 divsel, phaseinc, auxdiv;
2828         u32 iclk_virtual_root_freq = 172800 * 1000;
2829         u32 iclk_pi_range = 64;
2830         u32 desired_divisor;
2831         u32 temp;
2832
2833         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2834                 return 0;
2835
2836         mutex_lock(&dev_priv->sb_lock);
2837
2838         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2839         if (temp & SBI_SSCCTL_DISABLE) {
2840                 mutex_unlock(&dev_priv->sb_lock);
2841                 return 0;
2842         }
2843
2844         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2845         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2846                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2847         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2848                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2849
2850         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2851         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2852                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2853
2854         mutex_unlock(&dev_priv->sb_lock);
2855
2856         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2857
2858         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2859                                  desired_divisor << auxdiv);
2860 }
2861
2862 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2863                                            enum pipe pch_transcoder)
2864 {
2865         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2866         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2867         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2868
2869         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2870                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2871         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2872                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2873         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2874                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2875
2876         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2877                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2878         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2879                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2880         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2881                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2882         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2883                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2884 }
2885
2886 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2887 {
2888         u32 temp;
2889
2890         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2891         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2892                 return;
2893
2894         drm_WARN_ON(&dev_priv->drm,
2895                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2896                     FDI_RX_ENABLE);
2897         drm_WARN_ON(&dev_priv->drm,
2898                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2899                     FDI_RX_ENABLE);
2900
2901         temp &= ~FDI_BC_BIFURCATION_SELECT;
2902         if (enable)
2903                 temp |= FDI_BC_BIFURCATION_SELECT;
2904
2905         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2906                     enable ? "en" : "dis");
2907         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2908         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2909 }
2910
2911 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2912 {
2913         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2914         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2915
2916         switch (crtc->pipe) {
2917         case PIPE_A:
2918                 break;
2919         case PIPE_B:
2920                 if (crtc_state->fdi_lanes > 2)
2921                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
2922                 else
2923                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
2924
2925                 break;
2926         case PIPE_C:
2927                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
2928
2929                 break;
2930         default:
2931                 BUG();
2932         }
2933 }
2934
2935 /*
2936  * Finds the encoder associated with the given CRTC. This can only be
2937  * used when we know that the CRTC isn't feeding multiple encoders!
2938  */
2939 struct intel_encoder *
2940 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2941                            const struct intel_crtc_state *crtc_state)
2942 {
2943         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2944         const struct drm_connector_state *connector_state;
2945         const struct drm_connector *connector;
2946         struct intel_encoder *encoder = NULL;
2947         int num_encoders = 0;
2948         int i;
2949
2950         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2951                 if (connector_state->crtc != &crtc->base)
2952                         continue;
2953
2954                 encoder = to_intel_encoder(connector_state->best_encoder);
2955                 num_encoders++;
2956         }
2957
2958         drm_WARN(encoder->base.dev, num_encoders != 1,
2959                  "%d encoders for pipe %c\n",
2960                  num_encoders, pipe_name(crtc->pipe));
2961
2962         return encoder;
2963 }
2964
2965 /*
2966  * Enable PCH resources required for PCH ports:
2967  *   - PCH PLLs
2968  *   - FDI training & RX/TX
2969  *   - update transcoder timings
2970  *   - DP transcoding bits
2971  *   - transcoder
2972  */
2973 static void ilk_pch_enable(const struct intel_atomic_state *state,
2974                            const struct intel_crtc_state *crtc_state)
2975 {
2976         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2977         struct drm_device *dev = crtc->base.dev;
2978         struct drm_i915_private *dev_priv = to_i915(dev);
2979         enum pipe pipe = crtc->pipe;
2980         u32 temp;
2981
2982         assert_pch_transcoder_disabled(dev_priv, pipe);
2983
2984         if (IS_IVYBRIDGE(dev_priv))
2985                 ivb_update_fdi_bc_bifurcation(crtc_state);
2986
2987         /* Write the TU size bits before fdi link training, so that error
2988          * detection works. */
2989         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2990                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2991
2992         /* For PCH output, training FDI link */
2993         dev_priv->display.fdi_link_train(crtc, crtc_state);
2994
2995         /* We need to program the right clock selection before writing the pixel
2996          * mutliplier into the DPLL. */
2997         if (HAS_PCH_CPT(dev_priv)) {
2998                 u32 sel;
2999
3000                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3001                 temp |= TRANS_DPLL_ENABLE(pipe);
3002                 sel = TRANS_DPLLB_SEL(pipe);
3003                 if (crtc_state->shared_dpll ==
3004                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
3005                         temp |= sel;
3006                 else
3007                         temp &= ~sel;
3008                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3009         }
3010
3011         /* XXX: pch pll's can be enabled any time before we enable the PCH
3012          * transcoder, and we actually should do this to not upset any PCH
3013          * transcoder that already use the clock when we share it.
3014          *
3015          * Note that enable_shared_dpll tries to do the right thing, but
3016          * get_shared_dpll unconditionally resets the pll - we need that to have
3017          * the right LVDS enable sequence. */
3018         intel_enable_shared_dpll(crtc_state);
3019
3020         /* set transcoder timing, panel must allow it */
3021         assert_panel_unlocked(dev_priv, pipe);
3022         ilk_pch_transcoder_set_timings(crtc_state, pipe);
3023
3024         intel_fdi_normal_train(crtc);
3025
3026         /* For PCH DP, enable TRANS_DP_CTL */
3027         if (HAS_PCH_CPT(dev_priv) &&
3028             intel_crtc_has_dp_encoder(crtc_state)) {
3029                 const struct drm_display_mode *adjusted_mode =
3030                         &crtc_state->hw.adjusted_mode;
3031                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3032                 i915_reg_t reg = TRANS_DP_CTL(pipe);
3033                 enum port port;
3034
3035                 temp = intel_de_read(dev_priv, reg);
3036                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3037                           TRANS_DP_SYNC_MASK |
3038                           TRANS_DP_BPC_MASK);
3039                 temp |= TRANS_DP_OUTPUT_ENABLE;
3040                 temp |= bpc << 9; /* same format but at 11:9 */
3041
3042                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
3043                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3044                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
3045                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3046
3047                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
3048                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
3049                 temp |= TRANS_DP_PORT_SEL(port);
3050
3051                 intel_de_write(dev_priv, reg, temp);
3052         }
3053
3054         ilk_enable_pch_transcoder(crtc_state);
3055 }
3056
3057 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
3058 {
3059         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3060         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3061         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3062
3063         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
3064
3065         lpt_program_iclkip(crtc_state);
3066
3067         /* Set transcoder timing. */
3068         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
3069
3070         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3071 }
3072
3073 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
3074                                enum pipe pipe)
3075 {
3076         i915_reg_t dslreg = PIPEDSL(pipe);
3077         u32 temp;
3078
3079         temp = intel_de_read(dev_priv, dslreg);
3080         udelay(500);
3081         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
3082                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
3083                         drm_err(&dev_priv->drm,
3084                                 "mode set failed: pipe %c stuck\n",
3085                                 pipe_name(pipe));
3086         }
3087 }
3088
3089 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
3090 {
3091         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3092         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3093         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3094         enum pipe pipe = crtc->pipe;
3095         int width = drm_rect_width(dst);
3096         int height = drm_rect_height(dst);
3097         int x = dst->x1;
3098         int y = dst->y1;
3099
3100         if (!crtc_state->pch_pfit.enabled)
3101                 return;
3102
3103         /* Force use of hard-coded filter coefficients
3104          * as some pre-programmed values are broken,
3105          * e.g. x201.
3106          */
3107         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3108                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3109                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
3110         else
3111                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3112                                PF_FILTER_MED_3x3);
3113         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
3114         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
3115 }
3116
3117 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
3118 {
3119         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3120         struct drm_device *dev = crtc->base.dev;
3121         struct drm_i915_private *dev_priv = to_i915(dev);
3122
3123         if (!crtc_state->ips_enabled)
3124                 return;
3125
3126         /*
3127          * We can only enable IPS after we enable a plane and wait for a vblank
3128          * This function is called from post_plane_update, which is run after
3129          * a vblank wait.
3130          */
3131         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
3132
3133         if (IS_BROADWELL(dev_priv)) {
3134                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
3135                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
3136                 /* Quoting Art Runyan: "its not safe to expect any particular
3137                  * value in IPS_CTL bit 31 after enabling IPS through the
3138                  * mailbox." Moreover, the mailbox may return a bogus state,
3139                  * so we need to just enable it and continue on.
3140                  */
3141         } else {
3142                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
3143                 /* The bit only becomes 1 in the next vblank, so this wait here
3144                  * is essentially intel_wait_for_vblank. If we don't have this
3145                  * and don't wait for vblanks until the end of crtc_enable, then
3146                  * the HW state readout code will complain that the expected
3147                  * IPS_CTL value is not the one we read. */
3148                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
3149                         drm_err(&dev_priv->drm,
3150                                 "Timed out waiting for IPS enable\n");
3151         }
3152 }
3153
3154 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
3155 {
3156         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3157         struct drm_device *dev = crtc->base.dev;
3158         struct drm_i915_private *dev_priv = to_i915(dev);
3159
3160         if (!crtc_state->ips_enabled)
3161                 return;
3162
3163         if (IS_BROADWELL(dev_priv)) {
3164                 drm_WARN_ON(dev,
3165                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3166                 /*
3167                  * Wait for PCODE to finish disabling IPS. The BSpec specified
3168                  * 42ms timeout value leads to occasional timeouts so use 100ms
3169                  * instead.
3170                  */
3171                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
3172                         drm_err(&dev_priv->drm,
3173                                 "Timed out waiting for IPS disable\n");
3174         } else {
3175                 intel_de_write(dev_priv, IPS_CTL, 0);
3176                 intel_de_posting_read(dev_priv, IPS_CTL);
3177         }
3178
3179         /* We need to wait for a vblank before we can disable the plane. */
3180         intel_wait_for_vblank(dev_priv, crtc->pipe);
3181 }
3182
3183 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
3184 {
3185         if (intel_crtc->overlay)
3186                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3187
3188         /* Let userspace switch the overlay on again. In most cases userspace
3189          * has to recompute where to put it anyway.
3190          */
3191 }
3192
3193 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
3194                                        const struct intel_crtc_state *new_crtc_state)
3195 {
3196         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3197         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3198
3199         if (!old_crtc_state->ips_enabled)
3200                 return false;
3201
3202         if (intel_crtc_needs_modeset(new_crtc_state))
3203                 return true;
3204
3205         /*
3206          * Workaround : Do not read or write the pipe palette/gamma data while
3207          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3208          *
3209          * Disable IPS before we program the LUT.
3210          */
3211         if (IS_HASWELL(dev_priv) &&
3212             (new_crtc_state->uapi.color_mgmt_changed ||
3213              new_crtc_state->update_pipe) &&
3214             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3215                 return true;
3216
3217         return !new_crtc_state->ips_enabled;
3218 }
3219
3220 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
3221                                        const struct intel_crtc_state *new_crtc_state)
3222 {
3223         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3224         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3225
3226         if (!new_crtc_state->ips_enabled)
3227                 return false;
3228
3229         if (intel_crtc_needs_modeset(new_crtc_state))
3230                 return true;
3231
3232         /*
3233          * Workaround : Do not read or write the pipe palette/gamma data while
3234          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3235          *
3236          * Re-enable IPS after the LUT has been programmed.
3237          */
3238         if (IS_HASWELL(dev_priv) &&
3239             (new_crtc_state->uapi.color_mgmt_changed ||
3240              new_crtc_state->update_pipe) &&
3241             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3242                 return true;
3243
3244         /*
3245          * We can't read out IPS on broadwell, assume the worst and
3246          * forcibly enable IPS on the first fastset.
3247          */
3248         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
3249                 return true;
3250
3251         return !old_crtc_state->ips_enabled;
3252 }
3253
3254 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
3255 {
3256         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3257
3258         if (!crtc_state->nv12_planes)
3259                 return false;
3260
3261         /* WA Display #0827: Gen9:all */
3262         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
3263                 return true;
3264
3265         return false;
3266 }
3267
3268 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
3269 {
3270         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3271
3272         /* Wa_2006604312:icl,ehl */
3273         if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
3274                 return true;
3275
3276         return false;
3277 }
3278
3279 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
3280                             const struct intel_crtc_state *new_crtc_state)
3281 {
3282         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
3283                 new_crtc_state->active_planes;
3284 }
3285
3286 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
3287                              const struct intel_crtc_state *new_crtc_state)
3288 {
3289         return old_crtc_state->active_planes &&
3290                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
3291 }
3292
3293 static void intel_post_plane_update(struct intel_atomic_state *state,
3294                                     struct intel_crtc *crtc)
3295 {
3296         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3297         const struct intel_crtc_state *old_crtc_state =
3298                 intel_atomic_get_old_crtc_state(state, crtc);
3299         const struct intel_crtc_state *new_crtc_state =
3300                 intel_atomic_get_new_crtc_state(state, crtc);
3301         enum pipe pipe = crtc->pipe;
3302
3303         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
3304
3305         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
3306                 intel_update_watermarks(crtc);
3307
3308         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
3309                 hsw_enable_ips(new_crtc_state);
3310
3311         intel_fbc_post_update(state, crtc);
3312
3313         if (needs_nv12_wa(old_crtc_state) &&
3314             !needs_nv12_wa(new_crtc_state))
3315                 skl_wa_827(dev_priv, pipe, false);
3316
3317         if (needs_scalerclk_wa(old_crtc_state) &&
3318             !needs_scalerclk_wa(new_crtc_state))
3319                 icl_wa_scalerclkgating(dev_priv, pipe, false);
3320 }
3321
3322 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
3323                                         struct intel_crtc *crtc)
3324 {
3325         const struct intel_crtc_state *crtc_state =
3326                 intel_atomic_get_new_crtc_state(state, crtc);
3327         u8 update_planes = crtc_state->update_planes;
3328         const struct intel_plane_state *plane_state;
3329         struct intel_plane *plane;
3330         int i;
3331
3332         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3333                 if (plane->enable_flip_done &&
3334                     plane->pipe == crtc->pipe &&
3335                     update_planes & BIT(plane->id))
3336                         plane->enable_flip_done(plane);
3337         }
3338 }
3339
3340 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
3341                                          struct intel_crtc *crtc)
3342 {
3343         const struct intel_crtc_state *crtc_state =
3344                 intel_atomic_get_new_crtc_state(state, crtc);
3345         u8 update_planes = crtc_state->update_planes;
3346         const struct intel_plane_state *plane_state;
3347         struct intel_plane *plane;
3348         int i;
3349
3350         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
3351                 if (plane->disable_flip_done &&
3352                     plane->pipe == crtc->pipe &&
3353                     update_planes & BIT(plane->id))
3354                         plane->disable_flip_done(plane);
3355         }
3356 }
3357
3358 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
3359                                              struct intel_crtc *crtc)
3360 {
3361         struct drm_i915_private *i915 = to_i915(state->base.dev);
3362         const struct intel_crtc_state *old_crtc_state =
3363                 intel_atomic_get_old_crtc_state(state, crtc);
3364         const struct intel_crtc_state *new_crtc_state =
3365                 intel_atomic_get_new_crtc_state(state, crtc);
3366         u8 update_planes = new_crtc_state->update_planes;
3367         const struct intel_plane_state *old_plane_state;
3368         struct intel_plane *plane;
3369         bool need_vbl_wait = false;
3370         int i;
3371
3372         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3373                 if (plane->need_async_flip_disable_wa &&
3374                     plane->pipe == crtc->pipe &&
3375                     update_planes & BIT(plane->id)) {
3376                         /*
3377                          * Apart from the async flip bit we want to
3378                          * preserve the old state for the plane.
3379                          */
3380                         plane->async_flip(plane, old_crtc_state,
3381                                           old_plane_state, false);
3382                         need_vbl_wait = true;
3383                 }
3384         }
3385
3386         if (need_vbl_wait)
3387                 intel_wait_for_vblank(i915, crtc->pipe);
3388 }
3389
3390 static void intel_pre_plane_update(struct intel_atomic_state *state,
3391                                    struct intel_crtc *crtc)
3392 {
3393         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3394         const struct intel_crtc_state *old_crtc_state =
3395                 intel_atomic_get_old_crtc_state(state, crtc);
3396         const struct intel_crtc_state *new_crtc_state =
3397                 intel_atomic_get_new_crtc_state(state, crtc);
3398         enum pipe pipe = crtc->pipe;
3399
3400         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
3401                 hsw_disable_ips(old_crtc_state);
3402
3403         if (intel_fbc_pre_update(state, crtc))
3404                 intel_wait_for_vblank(dev_priv, pipe);
3405
3406         /* Display WA 827 */
3407         if (!needs_nv12_wa(old_crtc_state) &&
3408             needs_nv12_wa(new_crtc_state))
3409                 skl_wa_827(dev_priv, pipe, true);
3410
3411         /* Wa_2006604312:icl,ehl */
3412         if (!needs_scalerclk_wa(old_crtc_state) &&
3413             needs_scalerclk_wa(new_crtc_state))
3414                 icl_wa_scalerclkgating(dev_priv, pipe, true);
3415
3416         /*
3417          * Vblank time updates from the shadow to live plane control register
3418          * are blocked if the memory self-refresh mode is active at that
3419          * moment. So to make sure the plane gets truly disabled, disable
3420          * first the self-refresh mode. The self-refresh enable bit in turn
3421          * will be checked/applied by the HW only at the next frame start
3422          * event which is after the vblank start event, so we need to have a
3423          * wait-for-vblank between disabling the plane and the pipe.
3424          */
3425         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
3426             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
3427                 intel_wait_for_vblank(dev_priv, pipe);
3428
3429         /*
3430          * IVB workaround: must disable low power watermarks for at least
3431          * one frame before enabling scaling.  LP watermarks can be re-enabled
3432          * when scaling is disabled.
3433          *
3434          * WaCxSRDisabledForSpriteScaling:ivb
3435          */
3436         if (old_crtc_state->hw.active &&
3437             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
3438                 intel_wait_for_vblank(dev_priv, pipe);
3439
3440         /*
3441          * If we're doing a modeset we don't need to do any
3442          * pre-vblank watermark programming here.
3443          */
3444         if (!intel_crtc_needs_modeset(new_crtc_state)) {
3445                 /*
3446                  * For platforms that support atomic watermarks, program the
3447                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
3448                  * will be the intermediate values that are safe for both pre- and
3449                  * post- vblank; when vblank happens, the 'active' values will be set
3450                  * to the final 'target' values and we'll do this again to get the
3451                  * optimal watermarks.  For gen9+ platforms, the values we program here
3452                  * will be the final target values which will get automatically latched
3453                  * at vblank time; no further programming will be necessary.
3454                  *
3455                  * If a platform hasn't been transitioned to atomic watermarks yet,
3456                  * we'll continue to update watermarks the old way, if flags tell
3457                  * us to.
3458                  */
3459                 if (dev_priv->display.initial_watermarks)
3460                         dev_priv->display.initial_watermarks(state, crtc);
3461                 else if (new_crtc_state->update_wm_pre)
3462                         intel_update_watermarks(crtc);
3463         }
3464
3465         /*
3466          * Gen2 reports pipe underruns whenever all planes are disabled.
3467          * So disable underrun reporting before all the planes get disabled.
3468          *
3469          * We do this after .initial_watermarks() so that we have a
3470          * chance of catching underruns with the intermediate watermarks
3471          * vs. the old plane configuration.
3472          */
3473         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
3474                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3475
3476         /*
3477          * WA for platforms where async address update enable bit
3478          * is double buffered and only latched at start of vblank.
3479          */
3480         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
3481                 intel_crtc_async_flip_disable_wa(state, crtc);
3482 }
3483
3484 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
3485                                       struct intel_crtc *crtc)
3486 {
3487         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3488         const struct intel_crtc_state *new_crtc_state =
3489                 intel_atomic_get_new_crtc_state(state, crtc);
3490         unsigned int update_mask = new_crtc_state->update_planes;
3491         const struct intel_plane_state *old_plane_state;
3492         struct intel_plane *plane;
3493         unsigned fb_bits = 0;
3494         int i;
3495
3496         intel_crtc_dpms_overlay_disable(crtc);
3497
3498         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
3499                 if (crtc->pipe != plane->pipe ||
3500                     !(update_mask & BIT(plane->id)))
3501                         continue;
3502
3503                 intel_disable_plane(plane, new_crtc_state);
3504
3505                 if (old_plane_state->uapi.visible)
3506                         fb_bits |= plane->frontbuffer_bit;
3507         }
3508
3509         intel_frontbuffer_flip(dev_priv, fb_bits);
3510 }
3511
3512 /*
3513  * intel_connector_primary_encoder - get the primary encoder for a connector
3514  * @connector: connector for which to return the encoder
3515  *
3516  * Returns the primary encoder for a connector. There is a 1:1 mapping from
3517  * all connectors to their encoder, except for DP-MST connectors which have
3518  * both a virtual and a primary encoder. These DP-MST primary encoders can be
3519  * pointed to by as many DP-MST connectors as there are pipes.
3520  */
3521 static struct intel_encoder *
3522 intel_connector_primary_encoder(struct intel_connector *connector)
3523 {
3524         struct intel_encoder *encoder;
3525
3526         if (connector->mst_port)
3527                 return &dp_to_dig_port(connector->mst_port)->base;
3528
3529         encoder = intel_attached_encoder(connector);
3530         drm_WARN_ON(connector->base.dev, !encoder);
3531
3532         return encoder;
3533 }
3534
3535 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
3536 {
3537         struct drm_connector_state *new_conn_state;
3538         struct drm_connector *connector;
3539         int i;
3540
3541         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3542                                         i) {
3543                 struct intel_connector *intel_connector;
3544                 struct intel_encoder *encoder;
3545                 struct intel_crtc *crtc;
3546
3547                 if (!intel_connector_needs_modeset(state, connector))
3548                         continue;
3549
3550                 intel_connector = to_intel_connector(connector);
3551                 encoder = intel_connector_primary_encoder(intel_connector);
3552                 if (!encoder->update_prepare)
3553                         continue;
3554
3555                 crtc = new_conn_state->crtc ?
3556                         to_intel_crtc(new_conn_state->crtc) : NULL;
3557                 encoder->update_prepare(state, encoder, crtc);
3558         }
3559 }
3560
3561 static void intel_encoders_update_complete(struct intel_atomic_state *state)
3562 {
3563         struct drm_connector_state *new_conn_state;
3564         struct drm_connector *connector;
3565         int i;
3566
3567         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
3568                                         i) {
3569                 struct intel_connector *intel_connector;
3570                 struct intel_encoder *encoder;
3571                 struct intel_crtc *crtc;
3572
3573                 if (!intel_connector_needs_modeset(state, connector))
3574                         continue;
3575
3576                 intel_connector = to_intel_connector(connector);
3577                 encoder = intel_connector_primary_encoder(intel_connector);
3578                 if (!encoder->update_complete)
3579                         continue;
3580
3581                 crtc = new_conn_state->crtc ?
3582                         to_intel_crtc(new_conn_state->crtc) : NULL;
3583                 encoder->update_complete(state, encoder, crtc);
3584         }
3585 }
3586
3587 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
3588                                           struct intel_crtc *crtc)
3589 {
3590         const struct intel_crtc_state *crtc_state =
3591                 intel_atomic_get_new_crtc_state(state, crtc);
3592         const struct drm_connector_state *conn_state;
3593         struct drm_connector *conn;
3594         int i;
3595
3596         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3597                 struct intel_encoder *encoder =
3598                         to_intel_encoder(conn_state->best_encoder);
3599
3600                 if (conn_state->crtc != &crtc->base)
3601                         continue;
3602
3603                 if (encoder->pre_pll_enable)
3604                         encoder->pre_pll_enable(state, encoder,
3605                                                 crtc_state, conn_state);
3606         }
3607 }
3608
3609 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
3610                                       struct intel_crtc *crtc)
3611 {
3612         const struct intel_crtc_state *crtc_state =
3613                 intel_atomic_get_new_crtc_state(state, crtc);
3614         const struct drm_connector_state *conn_state;
3615         struct drm_connector *conn;
3616         int i;
3617
3618         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3619                 struct intel_encoder *encoder =
3620                         to_intel_encoder(conn_state->best_encoder);
3621
3622                 if (conn_state->crtc != &crtc->base)
3623                         continue;
3624
3625                 if (encoder->pre_enable)
3626                         encoder->pre_enable(state, encoder,
3627                                             crtc_state, conn_state);
3628         }
3629 }
3630
3631 static void intel_encoders_enable(struct intel_atomic_state *state,
3632                                   struct intel_crtc *crtc)
3633 {
3634         const struct intel_crtc_state *crtc_state =
3635                 intel_atomic_get_new_crtc_state(state, crtc);
3636         const struct drm_connector_state *conn_state;
3637         struct drm_connector *conn;
3638         int i;
3639
3640         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3641                 struct intel_encoder *encoder =
3642                         to_intel_encoder(conn_state->best_encoder);
3643
3644                 if (conn_state->crtc != &crtc->base)
3645                         continue;
3646
3647                 if (encoder->enable)
3648                         encoder->enable(state, encoder,
3649                                         crtc_state, conn_state);
3650                 intel_opregion_notify_encoder(encoder, true);
3651         }
3652 }
3653
3654 static void intel_encoders_disable(struct intel_atomic_state *state,
3655                                    struct intel_crtc *crtc)
3656 {
3657         const struct intel_crtc_state *old_crtc_state =
3658                 intel_atomic_get_old_crtc_state(state, crtc);
3659         const struct drm_connector_state *old_conn_state;
3660         struct drm_connector *conn;
3661         int i;
3662
3663         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3664                 struct intel_encoder *encoder =
3665                         to_intel_encoder(old_conn_state->best_encoder);
3666
3667                 if (old_conn_state->crtc != &crtc->base)
3668                         continue;
3669
3670                 intel_opregion_notify_encoder(encoder, false);
3671                 if (encoder->disable)
3672                         encoder->disable(state, encoder,
3673                                          old_crtc_state, old_conn_state);
3674         }
3675 }
3676
3677 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3678                                         struct intel_crtc *crtc)
3679 {
3680         const struct intel_crtc_state *old_crtc_state =
3681                 intel_atomic_get_old_crtc_state(state, crtc);
3682         const struct drm_connector_state *old_conn_state;
3683         struct drm_connector *conn;
3684         int i;
3685
3686         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3687                 struct intel_encoder *encoder =
3688                         to_intel_encoder(old_conn_state->best_encoder);
3689
3690                 if (old_conn_state->crtc != &crtc->base)
3691                         continue;
3692
3693                 if (encoder->post_disable)
3694                         encoder->post_disable(state, encoder,
3695                                               old_crtc_state, old_conn_state);
3696         }
3697 }
3698
3699 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3700                                             struct intel_crtc *crtc)
3701 {
3702         const struct intel_crtc_state *old_crtc_state =
3703                 intel_atomic_get_old_crtc_state(state, crtc);
3704         const struct drm_connector_state *old_conn_state;
3705         struct drm_connector *conn;
3706         int i;
3707
3708         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3709                 struct intel_encoder *encoder =
3710                         to_intel_encoder(old_conn_state->best_encoder);
3711
3712                 if (old_conn_state->crtc != &crtc->base)
3713                         continue;
3714
3715                 if (encoder->post_pll_disable)
3716                         encoder->post_pll_disable(state, encoder,
3717                                                   old_crtc_state, old_conn_state);
3718         }
3719 }
3720
3721 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3722                                        struct intel_crtc *crtc)
3723 {
3724         const struct intel_crtc_state *crtc_state =
3725                 intel_atomic_get_new_crtc_state(state, crtc);
3726         const struct drm_connector_state *conn_state;
3727         struct drm_connector *conn;
3728         int i;
3729
3730         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3731                 struct intel_encoder *encoder =
3732                         to_intel_encoder(conn_state->best_encoder);
3733
3734                 if (conn_state->crtc != &crtc->base)
3735                         continue;
3736
3737                 if (encoder->update_pipe)
3738                         encoder->update_pipe(state, encoder,
3739                                              crtc_state, conn_state);
3740         }
3741 }
3742
3743 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3744 {
3745         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3746         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3747
3748         plane->disable_plane(plane, crtc_state);
3749 }
3750
3751 static void ilk_crtc_enable(struct intel_atomic_state *state,
3752                             struct intel_crtc *crtc)
3753 {
3754         const struct intel_crtc_state *new_crtc_state =
3755                 intel_atomic_get_new_crtc_state(state, crtc);
3756         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3757         enum pipe pipe = crtc->pipe;
3758
3759         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3760                 return;
3761
3762         /*
3763          * Sometimes spurious CPU pipe underruns happen during FDI
3764          * training, at least with VGA+HDMI cloning. Suppress them.
3765          *
3766          * On ILK we get an occasional spurious CPU pipe underruns
3767          * between eDP port A enable and vdd enable. Also PCH port
3768          * enable seems to result in the occasional CPU pipe underrun.
3769          *
3770          * Spurious PCH underruns also occur during PCH enabling.
3771          */
3772         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3773         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3774
3775         if (new_crtc_state->has_pch_encoder)
3776                 intel_prepare_shared_dpll(new_crtc_state);
3777
3778         if (intel_crtc_has_dp_encoder(new_crtc_state))
3779                 intel_dp_set_m_n(new_crtc_state, M1_N1);
3780
3781         intel_set_transcoder_timings(new_crtc_state);
3782         intel_set_pipe_src_size(new_crtc_state);
3783
3784         if (new_crtc_state->has_pch_encoder)
3785                 intel_cpu_transcoder_set_m_n(new_crtc_state,
3786                                              &new_crtc_state->fdi_m_n, NULL);
3787
3788         ilk_set_pipeconf(new_crtc_state);
3789
3790         crtc->active = true;
3791
3792         intel_encoders_pre_enable(state, crtc);
3793
3794         if (new_crtc_state->has_pch_encoder) {
3795                 /* Note: FDI PLL enabling _must_ be done before we enable the
3796                  * cpu pipes, hence this is separate from all the other fdi/pch
3797                  * enabling. */
3798                 ilk_fdi_pll_enable(new_crtc_state);
3799         } else {
3800                 assert_fdi_tx_disabled(dev_priv, pipe);
3801                 assert_fdi_rx_disabled(dev_priv, pipe);
3802         }
3803
3804         ilk_pfit_enable(new_crtc_state);
3805
3806         /*
3807          * On ILK+ LUT must be loaded before the pipe is running but with
3808          * clocks enabled
3809          */
3810         intel_color_load_luts(new_crtc_state);
3811         intel_color_commit(new_crtc_state);
3812         /* update DSPCNTR to configure gamma for pipe bottom color */
3813         intel_disable_primary_plane(new_crtc_state);
3814
3815         if (dev_priv->display.initial_watermarks)
3816                 dev_priv->display.initial_watermarks(state, crtc);
3817         intel_enable_pipe(new_crtc_state);
3818
3819         if (new_crtc_state->has_pch_encoder)
3820                 ilk_pch_enable(state, new_crtc_state);
3821
3822         intel_crtc_vblank_on(new_crtc_state);
3823
3824         intel_encoders_enable(state, crtc);
3825
3826         if (HAS_PCH_CPT(dev_priv))
3827                 cpt_verify_modeset(dev_priv, pipe);
3828
3829         /*
3830          * Must wait for vblank to avoid spurious PCH FIFO underruns.
3831          * And a second vblank wait is needed at least on ILK with
3832          * some interlaced HDMI modes. Let's do the double wait always
3833          * in case there are more corner cases we don't know about.
3834          */
3835         if (new_crtc_state->has_pch_encoder) {
3836                 intel_wait_for_vblank(dev_priv, pipe);
3837                 intel_wait_for_vblank(dev_priv, pipe);
3838         }
3839         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3840         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3841 }
3842
3843 /* IPS only exists on ULT machines and is tied to pipe A. */
3844 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3845 {
3846         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3847 }
3848
3849 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3850                                             enum pipe pipe, bool apply)
3851 {
3852         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3853         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3854
3855         if (apply)
3856                 val |= mask;
3857         else
3858                 val &= ~mask;
3859
3860         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3861 }
3862
3863 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3864 {
3865         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3866         enum pipe pipe = crtc->pipe;
3867         u32 val;
3868
3869         val = MBUS_DBOX_A_CREDIT(2);
3870
3871         if (INTEL_GEN(dev_priv) >= 12) {
3872                 val |= MBUS_DBOX_BW_CREDIT(2);
3873                 val |= MBUS_DBOX_B_CREDIT(12);
3874         } else {
3875                 val |= MBUS_DBOX_BW_CREDIT(1);
3876                 val |= MBUS_DBOX_B_CREDIT(8);
3877         }
3878
3879         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3880 }
3881
3882 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3883 {
3884         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3885         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3886
3887         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3888                        HSW_LINETIME(crtc_state->linetime) |
3889                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
3890 }
3891
3892 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3893 {
3894         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3895         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3896         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3897         u32 val;
3898
3899         val = intel_de_read(dev_priv, reg);
3900         val &= ~HSW_FRAME_START_DELAY_MASK;
3901         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3902         intel_de_write(dev_priv, reg, val);
3903 }
3904
3905 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3906                                          const struct intel_crtc_state *crtc_state)
3907 {
3908         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3909         struct intel_crtc_state *master_crtc_state;
3910         struct drm_connector_state *conn_state;
3911         struct drm_connector *conn;
3912         struct intel_encoder *encoder = NULL;
3913         int i;
3914
3915         if (crtc_state->bigjoiner_slave)
3916                 master = crtc_state->bigjoiner_linked_crtc;
3917
3918         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3919
3920         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3921                 if (conn_state->crtc != &master->base)
3922                         continue;
3923
3924                 encoder = to_intel_encoder(conn_state->best_encoder);
3925                 break;
3926         }
3927
3928         if (!crtc_state->bigjoiner_slave) {
3929                 /* need to enable VDSC, which we skipped in pre-enable */
3930                 intel_dsc_enable(encoder, crtc_state);
3931         } else {
3932                 /*
3933                  * Enable sequence steps 1-7 on bigjoiner master
3934                  */
3935                 intel_encoders_pre_pll_enable(state, master);
3936                 intel_enable_shared_dpll(master_crtc_state);
3937                 intel_encoders_pre_enable(state, master);
3938
3939                 /* and DSC on slave */
3940                 intel_dsc_enable(NULL, crtc_state);
3941         }
3942 }
3943
3944 static void hsw_crtc_enable(struct intel_atomic_state *state,
3945                             struct intel_crtc *crtc)
3946 {
3947         const struct intel_crtc_state *new_crtc_state =
3948                 intel_atomic_get_new_crtc_state(state, crtc);
3949         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3950         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3951         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3952         bool psl_clkgate_wa;
3953
3954         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3955                 return;
3956
3957         if (!new_crtc_state->bigjoiner) {
3958                 intel_encoders_pre_pll_enable(state, crtc);
3959
3960                 if (new_crtc_state->shared_dpll)
3961                         intel_enable_shared_dpll(new_crtc_state);
3962
3963                 intel_encoders_pre_enable(state, crtc);
3964         } else {
3965                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3966         }
3967
3968         intel_set_pipe_src_size(new_crtc_state);
3969         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3970                 bdw_set_pipemisc(new_crtc_state);
3971
3972         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3973                 intel_set_transcoder_timings(new_crtc_state);
3974
3975                 if (cpu_transcoder != TRANSCODER_EDP)
3976                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3977                                        new_crtc_state->pixel_multiplier - 1);
3978
3979                 if (new_crtc_state->has_pch_encoder)
3980                         intel_cpu_transcoder_set_m_n(new_crtc_state,
3981                                                      &new_crtc_state->fdi_m_n, NULL);
3982
3983                 hsw_set_frame_start_delay(new_crtc_state);
3984         }
3985
3986         if (!transcoder_is_dsi(cpu_transcoder))
3987                 hsw_set_pipeconf(new_crtc_state);
3988
3989         crtc->active = true;
3990
3991         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3992         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3993                 new_crtc_state->pch_pfit.enabled;
3994         if (psl_clkgate_wa)
3995                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3996
3997         if (INTEL_GEN(dev_priv) >= 9)
3998                 skl_pfit_enable(new_crtc_state);
3999         else
4000                 ilk_pfit_enable(new_crtc_state);
4001
4002         /*
4003          * On ILK+ LUT must be loaded before the pipe is running but with
4004          * clocks enabled
4005          */
4006         intel_color_load_luts(new_crtc_state);
4007         intel_color_commit(new_crtc_state);
4008         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
4009         if (INTEL_GEN(dev_priv) < 9)
4010                 intel_disable_primary_plane(new_crtc_state);
4011
4012         hsw_set_linetime_wm(new_crtc_state);
4013
4014         if (INTEL_GEN(dev_priv) >= 11)
4015                 icl_set_pipe_chicken(crtc);
4016
4017         if (dev_priv->display.initial_watermarks)
4018                 dev_priv->display.initial_watermarks(state, crtc);
4019
4020         if (INTEL_GEN(dev_priv) >= 11)
4021                 icl_pipe_mbus_enable(crtc);
4022
4023         if (new_crtc_state->bigjoiner_slave)
4024                 intel_crtc_vblank_on(new_crtc_state);
4025
4026         intel_encoders_enable(state, crtc);
4027
4028         if (psl_clkgate_wa) {
4029                 intel_wait_for_vblank(dev_priv, pipe);
4030                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
4031         }
4032
4033         /* If we change the relative order between pipe/planes enabling, we need
4034          * to change the workaround. */
4035         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
4036         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
4037                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4038                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4039         }
4040 }
4041
4042 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4043 {
4044         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4045         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4046         enum pipe pipe = crtc->pipe;
4047
4048         /* To avoid upsetting the power well on haswell only disable the pfit if
4049          * it's in use. The hw state code will make sure we get this right. */
4050         if (!old_crtc_state->pch_pfit.enabled)
4051                 return;
4052
4053         intel_de_write(dev_priv, PF_CTL(pipe), 0);
4054         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
4055         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
4056 }
4057
4058 static void ilk_crtc_disable(struct intel_atomic_state *state,
4059                              struct intel_crtc *crtc)
4060 {
4061         const struct intel_crtc_state *old_crtc_state =
4062                 intel_atomic_get_old_crtc_state(state, crtc);
4063         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4064         enum pipe pipe = crtc->pipe;
4065
4066         /*
4067          * Sometimes spurious CPU pipe underruns happen when the
4068          * pipe is already disabled, but FDI RX/TX is still enabled.
4069          * Happens at least with VGA+HDMI cloning. Suppress them.
4070          */
4071         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4072         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4073
4074         intel_encoders_disable(state, crtc);
4075
4076         intel_crtc_vblank_off(old_crtc_state);
4077
4078         intel_disable_pipe(old_crtc_state);
4079
4080         ilk_pfit_disable(old_crtc_state);
4081
4082         if (old_crtc_state->has_pch_encoder)
4083                 ilk_fdi_disable(crtc);
4084
4085         intel_encoders_post_disable(state, crtc);
4086
4087         if (old_crtc_state->has_pch_encoder) {
4088                 ilk_disable_pch_transcoder(dev_priv, pipe);
4089
4090                 if (HAS_PCH_CPT(dev_priv)) {
4091                         i915_reg_t reg;
4092                         u32 temp;
4093
4094                         /* disable TRANS_DP_CTL */
4095                         reg = TRANS_DP_CTL(pipe);
4096                         temp = intel_de_read(dev_priv, reg);
4097                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4098                                   TRANS_DP_PORT_SEL_MASK);
4099                         temp |= TRANS_DP_PORT_SEL_NONE;
4100                         intel_de_write(dev_priv, reg, temp);
4101
4102                         /* disable DPLL_SEL */
4103                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
4104                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4105                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
4106                 }
4107
4108                 ilk_fdi_pll_disable(crtc);
4109         }
4110
4111         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4112         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4113 }
4114
4115 static void hsw_crtc_disable(struct intel_atomic_state *state,
4116                              struct intel_crtc *crtc)
4117 {
4118         /*
4119          * FIXME collapse everything to one hook.
4120          * Need care with mst->ddi interactions.
4121          */
4122         intel_encoders_disable(state, crtc);
4123         intel_encoders_post_disable(state, crtc);
4124 }
4125
4126 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
4127 {
4128         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4129         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4130
4131         if (!crtc_state->gmch_pfit.control)
4132                 return;
4133
4134         /*
4135          * The panel fitter should only be adjusted whilst the pipe is disabled,
4136          * according to register description and PRM.
4137          */
4138         drm_WARN_ON(&dev_priv->drm,
4139                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
4140         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
4141
4142         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
4143                        crtc_state->gmch_pfit.pgm_ratios);
4144         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
4145
4146         /* Border color in case we don't scale up to the full screen. Black by
4147          * default, change to something else for debugging. */
4148         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
4149 }
4150
4151 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
4152 {
4153         if (phy == PHY_NONE)
4154                 return false;
4155         else if (IS_ALDERLAKE_S(dev_priv))
4156                 return phy <= PHY_E;
4157         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
4158                 return phy <= PHY_D;
4159         else if (IS_JSL_EHL(dev_priv))
4160                 return phy <= PHY_C;
4161         else if (INTEL_GEN(dev_priv) >= 11)
4162                 return phy <= PHY_B;
4163         else
4164                 return false;
4165 }
4166
4167 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
4168 {
4169         if (IS_TIGERLAKE(dev_priv))
4170                 return phy >= PHY_D && phy <= PHY_I;
4171         else if (IS_ICELAKE(dev_priv))
4172                 return phy >= PHY_C && phy <= PHY_F;
4173         else
4174                 return false;
4175 }
4176
4177 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
4178 {
4179         if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
4180                 return PHY_B + port - PORT_TC1;
4181         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
4182                 return PHY_C + port - PORT_TC1;
4183         else if (IS_JSL_EHL(i915) && port == PORT_D)
4184                 return PHY_A;
4185
4186         return PHY_A + port - PORT_A;
4187 }
4188
4189 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
4190 {
4191         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
4192                 return TC_PORT_NONE;
4193
4194         if (INTEL_GEN(dev_priv) >= 12)
4195                 return TC_PORT_1 + port - PORT_TC1;
4196         else
4197                 return TC_PORT_1 + port - PORT_C;
4198 }
4199
4200 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
4201 {
4202         switch (port) {
4203         case PORT_A:
4204                 return POWER_DOMAIN_PORT_DDI_A_LANES;
4205         case PORT_B:
4206                 return POWER_DOMAIN_PORT_DDI_B_LANES;
4207         case PORT_C:
4208                 return POWER_DOMAIN_PORT_DDI_C_LANES;
4209         case PORT_D:
4210                 return POWER_DOMAIN_PORT_DDI_D_LANES;
4211         case PORT_E:
4212                 return POWER_DOMAIN_PORT_DDI_E_LANES;
4213         case PORT_F:
4214                 return POWER_DOMAIN_PORT_DDI_F_LANES;
4215         case PORT_G:
4216                 return POWER_DOMAIN_PORT_DDI_G_LANES;
4217         case PORT_H:
4218                 return POWER_DOMAIN_PORT_DDI_H_LANES;
4219         case PORT_I:
4220                 return POWER_DOMAIN_PORT_DDI_I_LANES;
4221         default:
4222                 MISSING_CASE(port);
4223                 return POWER_DOMAIN_PORT_OTHER;
4224         }
4225 }
4226
4227 enum intel_display_power_domain
4228 intel_aux_power_domain(struct intel_digital_port *dig_port)
4229 {
4230         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4231         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
4232
4233         if (intel_phy_is_tc(dev_priv, phy) &&
4234             dig_port->tc_mode == TC_PORT_TBT_ALT) {
4235                 switch (dig_port->aux_ch) {
4236                 case AUX_CH_C:
4237                         return POWER_DOMAIN_AUX_C_TBT;
4238                 case AUX_CH_D:
4239                         return POWER_DOMAIN_AUX_D_TBT;
4240                 case AUX_CH_E:
4241                         return POWER_DOMAIN_AUX_E_TBT;
4242                 case AUX_CH_F:
4243                         return POWER_DOMAIN_AUX_F_TBT;
4244                 case AUX_CH_G:
4245                         return POWER_DOMAIN_AUX_G_TBT;
4246                 case AUX_CH_H:
4247                         return POWER_DOMAIN_AUX_H_TBT;
4248                 case AUX_CH_I:
4249                         return POWER_DOMAIN_AUX_I_TBT;
4250                 default:
4251                         MISSING_CASE(dig_port->aux_ch);
4252                         return POWER_DOMAIN_AUX_C_TBT;
4253                 }
4254         }
4255
4256         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
4257 }
4258
4259 /*
4260  * Converts aux_ch to power_domain without caring about TBT ports for that use
4261  * intel_aux_power_domain()
4262  */
4263 enum intel_display_power_domain
4264 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
4265 {
4266         switch (aux_ch) {
4267         case AUX_CH_A:
4268                 return POWER_DOMAIN_AUX_A;
4269         case AUX_CH_B:
4270                 return POWER_DOMAIN_AUX_B;
4271         case AUX_CH_C:
4272                 return POWER_DOMAIN_AUX_C;
4273         case AUX_CH_D:
4274                 return POWER_DOMAIN_AUX_D;
4275         case AUX_CH_E:
4276                 return POWER_DOMAIN_AUX_E;
4277         case AUX_CH_F:
4278                 return POWER_DOMAIN_AUX_F;
4279         case AUX_CH_G:
4280                 return POWER_DOMAIN_AUX_G;
4281         case AUX_CH_H:
4282                 return POWER_DOMAIN_AUX_H;
4283         case AUX_CH_I:
4284                 return POWER_DOMAIN_AUX_I;
4285         default:
4286                 MISSING_CASE(aux_ch);
4287                 return POWER_DOMAIN_AUX_A;
4288         }
4289 }
4290
4291 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4292 {
4293         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4294         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4295         struct drm_encoder *encoder;
4296         enum pipe pipe = crtc->pipe;
4297         u64 mask;
4298         enum transcoder transcoder = crtc_state->cpu_transcoder;
4299
4300         if (!crtc_state->hw.active)
4301                 return 0;
4302
4303         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
4304         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
4305         if (crtc_state->pch_pfit.enabled ||
4306             crtc_state->pch_pfit.force_thru)
4307                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4308
4309         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
4310                                   crtc_state->uapi.encoder_mask) {
4311                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4312
4313                 mask |= BIT_ULL(intel_encoder->power_domain);
4314         }
4315
4316         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
4317                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
4318
4319         if (crtc_state->shared_dpll)
4320                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
4321
4322         if (crtc_state->dsc.compression_enable)
4323                 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
4324
4325         return mask;
4326 }
4327
4328 static u64
4329 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
4330 {
4331         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4332         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4333         enum intel_display_power_domain domain;
4334         u64 domains, new_domains, old_domains;
4335
4336         domains = get_crtc_power_domains(crtc_state);
4337
4338         new_domains = domains & ~crtc->enabled_power_domains.mask;
4339         old_domains = crtc->enabled_power_domains.mask & ~domains;
4340
4341         for_each_power_domain(domain, new_domains)
4342                 intel_display_power_get_in_set(dev_priv,
4343                                                &crtc->enabled_power_domains,
4344                                                domain);
4345
4346         return old_domains;
4347 }
4348
4349 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
4350                                            u64 domains)
4351 {
4352         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
4353                                             &crtc->enabled_power_domains,
4354                                             domains);
4355 }
4356
4357 static void valleyview_crtc_enable(struct intel_atomic_state *state,
4358                                    struct intel_crtc *crtc)
4359 {
4360         const struct intel_crtc_state *new_crtc_state =
4361                 intel_atomic_get_new_crtc_state(state, crtc);
4362         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4363         enum pipe pipe = crtc->pipe;
4364
4365         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4366                 return;
4367
4368         if (intel_crtc_has_dp_encoder(new_crtc_state))
4369                 intel_dp_set_m_n(new_crtc_state, M1_N1);
4370
4371         intel_set_transcoder_timings(new_crtc_state);
4372         intel_set_pipe_src_size(new_crtc_state);
4373
4374         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4375                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
4376                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
4377         }
4378
4379         i9xx_set_pipeconf(new_crtc_state);
4380
4381         crtc->active = true;
4382
4383         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4384
4385         intel_encoders_pre_pll_enable(state, crtc);
4386
4387         if (IS_CHERRYVIEW(dev_priv)) {
4388                 chv_prepare_pll(crtc, new_crtc_state);
4389                 chv_enable_pll(crtc, new_crtc_state);
4390         } else {
4391                 vlv_prepare_pll(crtc, new_crtc_state);
4392                 vlv_enable_pll(crtc, new_crtc_state);
4393         }
4394
4395         intel_encoders_pre_enable(state, crtc);
4396
4397         i9xx_pfit_enable(new_crtc_state);
4398
4399         intel_color_load_luts(new_crtc_state);
4400         intel_color_commit(new_crtc_state);
4401         /* update DSPCNTR to configure gamma for pipe bottom color */
4402         intel_disable_primary_plane(new_crtc_state);
4403
4404         dev_priv->display.initial_watermarks(state, crtc);
4405         intel_enable_pipe(new_crtc_state);
4406
4407         intel_crtc_vblank_on(new_crtc_state);
4408
4409         intel_encoders_enable(state, crtc);
4410 }
4411
4412 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
4413 {
4414         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4415         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4416
4417         intel_de_write(dev_priv, FP0(crtc->pipe),
4418                        crtc_state->dpll_hw_state.fp0);
4419         intel_de_write(dev_priv, FP1(crtc->pipe),
4420                        crtc_state->dpll_hw_state.fp1);
4421 }
4422
4423 static void i9xx_crtc_enable(struct intel_atomic_state *state,
4424                              struct intel_crtc *crtc)
4425 {
4426         const struct intel_crtc_state *new_crtc_state =
4427                 intel_atomic_get_new_crtc_state(state, crtc);
4428         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4429         enum pipe pipe = crtc->pipe;
4430
4431         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4432                 return;
4433
4434         i9xx_set_pll_dividers(new_crtc_state);
4435
4436         if (intel_crtc_has_dp_encoder(new_crtc_state))
4437                 intel_dp_set_m_n(new_crtc_state, M1_N1);
4438
4439         intel_set_transcoder_timings(new_crtc_state);
4440         intel_set_pipe_src_size(new_crtc_state);
4441
4442         i9xx_set_pipeconf(new_crtc_state);
4443
4444         crtc->active = true;
4445
4446         if (!IS_GEN(dev_priv, 2))
4447                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4448
4449         intel_encoders_pre_enable(state, crtc);
4450
4451         i9xx_enable_pll(crtc, new_crtc_state);
4452
4453         i9xx_pfit_enable(new_crtc_state);
4454
4455         intel_color_load_luts(new_crtc_state);
4456         intel_color_commit(new_crtc_state);
4457         /* update DSPCNTR to configure gamma for pipe bottom color */
4458         intel_disable_primary_plane(new_crtc_state);
4459
4460         if (dev_priv->display.initial_watermarks)
4461                 dev_priv->display.initial_watermarks(state, crtc);
4462         else
4463                 intel_update_watermarks(crtc);
4464         intel_enable_pipe(new_crtc_state);
4465
4466         intel_crtc_vblank_on(new_crtc_state);
4467
4468         intel_encoders_enable(state, crtc);
4469
4470         /* prevents spurious underruns */
4471         if (IS_GEN(dev_priv, 2))
4472                 intel_wait_for_vblank(dev_priv, pipe);
4473 }
4474
4475 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4476 {
4477         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4478         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4479
4480         if (!old_crtc_state->gmch_pfit.control)
4481                 return;
4482
4483         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
4484
4485         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
4486                     intel_de_read(dev_priv, PFIT_CONTROL));
4487         intel_de_write(dev_priv, PFIT_CONTROL, 0);
4488 }
4489
4490 static void i9xx_crtc_disable(struct intel_atomic_state *state,
4491                               struct intel_crtc *crtc)
4492 {
4493         struct intel_crtc_state *old_crtc_state =
4494                 intel_atomic_get_old_crtc_state(state, crtc);
4495         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4496         enum pipe pipe = crtc->pipe;
4497
4498         /*
4499          * On gen2 planes are double buffered but the pipe isn't, so we must
4500          * wait for planes to fully turn off before disabling the pipe.
4501          */
4502         if (IS_GEN(dev_priv, 2))
4503                 intel_wait_for_vblank(dev_priv, pipe);
4504
4505         intel_encoders_disable(state, crtc);
4506
4507         intel_crtc_vblank_off(old_crtc_state);
4508
4509         intel_disable_pipe(old_crtc_state);
4510
4511         i9xx_pfit_disable(old_crtc_state);
4512
4513         intel_encoders_post_disable(state, crtc);
4514
4515         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
4516                 if (IS_CHERRYVIEW(dev_priv))
4517                         chv_disable_pll(dev_priv, pipe);
4518                 else if (IS_VALLEYVIEW(dev_priv))
4519                         vlv_disable_pll(dev_priv, pipe);
4520                 else
4521                         i9xx_disable_pll(old_crtc_state);
4522         }
4523
4524         intel_encoders_post_pll_disable(state, crtc);
4525
4526         if (!IS_GEN(dev_priv, 2))
4527                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4528
4529         if (!dev_priv->display.initial_watermarks)
4530                 intel_update_watermarks(crtc);
4531
4532         /* clock the pipe down to 640x480@60 to potentially save power */
4533         if (IS_I830(dev_priv))
4534                 i830_enable_pipe(dev_priv, pipe);
4535 }
4536
4537 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
4538                                         struct drm_modeset_acquire_ctx *ctx)
4539 {
4540         struct intel_encoder *encoder;
4541         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4542         struct intel_bw_state *bw_state =
4543                 to_intel_bw_state(dev_priv->bw_obj.state);
4544         struct intel_cdclk_state *cdclk_state =
4545                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
4546         struct intel_dbuf_state *dbuf_state =
4547                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
4548         struct intel_crtc_state *crtc_state =
4549                 to_intel_crtc_state(crtc->base.state);
4550         struct intel_plane *plane;
4551         struct drm_atomic_state *state;
4552         struct intel_crtc_state *temp_crtc_state;
4553         enum pipe pipe = crtc->pipe;
4554         int ret;
4555
4556         if (!crtc_state->hw.active)
4557                 return;
4558
4559         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
4560                 const struct intel_plane_state *plane_state =
4561                         to_intel_plane_state(plane->base.state);
4562
4563                 if (plane_state->uapi.visible)
4564                         intel_plane_disable_noatomic(crtc, plane);
4565         }
4566
4567         state = drm_atomic_state_alloc(&dev_priv->drm);
4568         if (!state) {
4569                 drm_dbg_kms(&dev_priv->drm,
4570                             "failed to disable [CRTC:%d:%s], out of memory",
4571                             crtc->base.base.id, crtc->base.name);
4572                 return;
4573         }
4574
4575         state->acquire_ctx = ctx;
4576
4577         /* Everything's already locked, -EDEADLK can't happen. */
4578         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
4579         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
4580
4581         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
4582
4583         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
4584
4585         drm_atomic_state_put(state);
4586
4587         drm_dbg_kms(&dev_priv->drm,
4588                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4589                     crtc->base.base.id, crtc->base.name);
4590
4591         crtc->active = false;
4592         crtc->base.enabled = false;
4593
4594         drm_WARN_ON(&dev_priv->drm,
4595                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4596         crtc_state->uapi.active = false;
4597         crtc_state->uapi.connector_mask = 0;
4598         crtc_state->uapi.encoder_mask = 0;
4599         intel_crtc_free_hw_state(crtc_state);
4600         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4601
4602         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4603                 encoder->base.crtc = NULL;
4604
4605         intel_fbc_disable(crtc);
4606         intel_update_watermarks(crtc);
4607         intel_disable_shared_dpll(crtc_state);
4608
4609         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4610
4611         dev_priv->active_pipes &= ~BIT(pipe);
4612         cdclk_state->min_cdclk[pipe] = 0;
4613         cdclk_state->min_voltage_level[pipe] = 0;
4614         cdclk_state->active_pipes &= ~BIT(pipe);
4615
4616         dbuf_state->active_pipes &= ~BIT(pipe);
4617
4618         bw_state->data_rate[pipe] = 0;
4619         bw_state->num_active_planes[pipe] = 0;
4620 }
4621
4622 /*
4623  * turn all crtc's off, but do not adjust state
4624  * This has to be paired with a call to intel_modeset_setup_hw_state.
4625  */
4626 int intel_display_suspend(struct drm_device *dev)
4627 {
4628         struct drm_i915_private *dev_priv = to_i915(dev);
4629         struct drm_atomic_state *state;
4630         int ret;
4631
4632         state = drm_atomic_helper_suspend(dev);
4633         ret = PTR_ERR_OR_ZERO(state);
4634         if (ret)
4635                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4636                         ret);
4637         else
4638                 dev_priv->modeset_restore_state = state;
4639         return ret;
4640 }
4641
4642 void intel_encoder_destroy(struct drm_encoder *encoder)
4643 {
4644         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4645
4646         drm_encoder_cleanup(encoder);
4647         kfree(intel_encoder);
4648 }
4649
4650 /* Cross check the actual hw state with our own modeset state tracking (and it's
4651  * internal consistency). */
4652 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4653                                          struct drm_connector_state *conn_state)
4654 {
4655         struct intel_connector *connector = to_intel_connector(conn_state->connector);
4656         struct drm_i915_private *i915 = to_i915(connector->base.dev);
4657
4658         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4659                     connector->base.base.id, connector->base.name);
4660
4661         if (connector->get_hw_state(connector)) {
4662                 struct intel_encoder *encoder = intel_attached_encoder(connector);
4663
4664                 I915_STATE_WARN(!crtc_state,
4665                          "connector enabled without attached crtc\n");
4666
4667                 if (!crtc_state)
4668                         return;
4669
4670                 I915_STATE_WARN(!crtc_state->hw.active,
4671                                 "connector is active, but attached crtc isn't\n");
4672
4673                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4674                         return;
4675
4676                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4677                         "atomic encoder doesn't match attached encoder\n");
4678
4679                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4680                         "attached encoder crtc differs from connector crtc\n");
4681         } else {
4682                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4683                                 "attached crtc is active, but connector isn't\n");
4684                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4685                         "best encoder set without crtc!\n");
4686         }
4687 }
4688
4689 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4690 {
4691         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4692         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4693
4694         /* IPS only exists on ULT machines and is tied to pipe A. */
4695         if (!hsw_crtc_supports_ips(crtc))
4696                 return false;
4697
4698         if (!dev_priv->params.enable_ips)
4699                 return false;
4700
4701         if (crtc_state->pipe_bpp > 24)
4702                 return false;
4703
4704         /*
4705          * We compare against max which means we must take
4706          * the increased cdclk requirement into account when
4707          * calculating the new cdclk.
4708          *
4709          * Should measure whether using a lower cdclk w/o IPS
4710          */
4711         if (IS_BROADWELL(dev_priv) &&
4712             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4713                 return false;
4714
4715         return true;
4716 }
4717
4718 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4719 {
4720         struct drm_i915_private *dev_priv =
4721                 to_i915(crtc_state->uapi.crtc->dev);
4722         struct intel_atomic_state *state =
4723                 to_intel_atomic_state(crtc_state->uapi.state);
4724
4725         crtc_state->ips_enabled = false;
4726
4727         if (!hsw_crtc_state_ips_capable(crtc_state))
4728                 return 0;
4729
4730         /*
4731          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4732          * enabled and disabled dynamically based on package C states,
4733          * user space can't make reliable use of the CRCs, so let's just
4734          * completely disable it.
4735          */
4736         if (crtc_state->crc_enabled)
4737                 return 0;
4738
4739         /* IPS should be fine as long as at least one plane is enabled. */
4740         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4741                 return 0;
4742
4743         if (IS_BROADWELL(dev_priv)) {
4744                 const struct intel_cdclk_state *cdclk_state;
4745
4746                 cdclk_state = intel_atomic_get_cdclk_state(state);
4747                 if (IS_ERR(cdclk_state))
4748                         return PTR_ERR(cdclk_state);
4749
4750                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4751                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4752                         return 0;
4753         }
4754
4755         crtc_state->ips_enabled = true;
4756
4757         return 0;
4758 }
4759
4760 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4761 {
4762         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4763
4764         /* GDG double wide on either pipe, otherwise pipe A only */
4765         return INTEL_GEN(dev_priv) < 4 &&
4766                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4767 }
4768
4769 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4770 {
4771         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4772         unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
4773
4774         /*
4775          * We only use IF-ID interlacing. If we ever use
4776          * PF-ID we'll need to adjust the pixel_rate here.
4777          */
4778
4779         if (!crtc_state->pch_pfit.enabled)
4780                 return pixel_rate;
4781
4782         pipe_w = crtc_state->pipe_src_w;
4783         pipe_h = crtc_state->pipe_src_h;
4784
4785         pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
4786         pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
4787
4788         if (pipe_w < pfit_w)
4789                 pipe_w = pfit_w;
4790         if (pipe_h < pfit_h)
4791                 pipe_h = pfit_h;
4792
4793         if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4794                         !pfit_w || !pfit_h))
4795                 return pixel_rate;
4796
4797         return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4798                        pfit_w * pfit_h);
4799 }
4800
4801 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4802                                          const struct drm_display_mode *timings)
4803 {
4804         mode->hdisplay = timings->crtc_hdisplay;
4805         mode->htotal = timings->crtc_htotal;
4806         mode->hsync_start = timings->crtc_hsync_start;
4807         mode->hsync_end = timings->crtc_hsync_end;
4808
4809         mode->vdisplay = timings->crtc_vdisplay;
4810         mode->vtotal = timings->crtc_vtotal;
4811         mode->vsync_start = timings->crtc_vsync_start;
4812         mode->vsync_end = timings->crtc_vsync_end;
4813
4814         mode->flags = timings->flags;
4815         mode->type = DRM_MODE_TYPE_DRIVER;
4816
4817         mode->clock = timings->crtc_clock;
4818
4819         drm_mode_set_name(mode);
4820 }
4821
4822 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4823 {
4824         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4825
4826         if (HAS_GMCH(dev_priv))
4827                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
4828                 crtc_state->pixel_rate =
4829                         crtc_state->hw.pipe_mode.crtc_clock;
4830         else
4831                 crtc_state->pixel_rate =
4832                         ilk_pipe_pixel_rate(crtc_state);
4833 }
4834
4835 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4836 {
4837         struct drm_display_mode *mode = &crtc_state->hw.mode;
4838         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4839         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4840
4841         drm_mode_copy(pipe_mode, adjusted_mode);
4842
4843         if (crtc_state->bigjoiner) {
4844                 /*
4845                  * transcoder is programmed to the full mode,
4846                  * but pipe timings are half of the transcoder mode
4847                  */
4848                 pipe_mode->crtc_hdisplay /= 2;
4849                 pipe_mode->crtc_hblank_start /= 2;
4850                 pipe_mode->crtc_hblank_end /= 2;
4851                 pipe_mode->crtc_hsync_start /= 2;
4852                 pipe_mode->crtc_hsync_end /= 2;
4853                 pipe_mode->crtc_htotal /= 2;
4854                 pipe_mode->crtc_clock /= 2;
4855         }
4856
4857         if (crtc_state->splitter.enable) {
4858                 int n = crtc_state->splitter.link_count;
4859                 int overlap = crtc_state->splitter.pixel_overlap;
4860
4861                 /*
4862                  * eDP MSO uses segment timings from EDID for transcoder
4863                  * timings, but full mode for everything else.
4864                  *
4865                  * h_full = (h_segment - pixel_overlap) * link_count
4866                  */
4867                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4868                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4869                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4870                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4871                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4872                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4873                 pipe_mode->crtc_clock *= n;
4874
4875                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4876                 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4877         } else {
4878                 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4879                 intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4880         }
4881
4882         intel_crtc_compute_pixel_rate(crtc_state);
4883
4884         drm_mode_copy(mode, adjusted_mode);
4885         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4886         mode->vdisplay = crtc_state->pipe_src_h;
4887 }
4888
4889 static void intel_encoder_get_config(struct intel_encoder *encoder,
4890                                      struct intel_crtc_state *crtc_state)
4891 {
4892         encoder->get_config(encoder, crtc_state);
4893
4894         intel_crtc_readout_derived_state(crtc_state);
4895 }
4896
4897 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4898                                      struct intel_crtc_state *pipe_config)
4899 {
4900         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4901         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4902         int clock_limit = dev_priv->max_dotclk_freq;
4903
4904         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4905
4906         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4907         if (pipe_config->bigjoiner) {
4908                 pipe_mode->crtc_clock /= 2;
4909                 pipe_mode->crtc_hdisplay /= 2;
4910                 pipe_mode->crtc_hblank_start /= 2;
4911                 pipe_mode->crtc_hblank_end /= 2;
4912                 pipe_mode->crtc_hsync_start /= 2;
4913                 pipe_mode->crtc_hsync_end /= 2;
4914                 pipe_mode->crtc_htotal /= 2;
4915                 pipe_config->pipe_src_w /= 2;
4916         }
4917
4918         if (pipe_config->splitter.enable) {
4919                 int n = pipe_config->splitter.link_count;
4920                 int overlap = pipe_config->splitter.pixel_overlap;
4921
4922                 pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4923                 pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4924                 pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4925                 pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4926                 pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4927                 pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4928                 pipe_mode->crtc_clock *= n;
4929         }
4930
4931         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4932
4933         if (INTEL_GEN(dev_priv) < 4) {
4934                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4935
4936                 /*
4937                  * Enable double wide mode when the dot clock
4938                  * is > 90% of the (display) core speed.
4939                  */
4940                 if (intel_crtc_supports_double_wide(crtc) &&
4941                     pipe_mode->crtc_clock > clock_limit) {
4942                         clock_limit = dev_priv->max_dotclk_freq;
4943                         pipe_config->double_wide = true;
4944                 }
4945         }
4946
4947         if (pipe_mode->crtc_clock > clock_limit) {
4948                 drm_dbg_kms(&dev_priv->drm,
4949                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4950                             pipe_mode->crtc_clock, clock_limit,
4951                             yesno(pipe_config->double_wide));
4952                 return -EINVAL;
4953         }
4954
4955         /*
4956          * Pipe horizontal size must be even in:
4957          * - DVO ganged mode
4958          * - LVDS dual channel mode
4959          * - Double wide pipe
4960          */
4961         if (pipe_config->pipe_src_w & 1) {
4962                 if (pipe_config->double_wide) {
4963                         drm_dbg_kms(&dev_priv->drm,
4964                                     "Odd pipe source width not supported with double wide pipe\n");
4965                         return -EINVAL;
4966                 }
4967
4968                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4969                     intel_is_dual_link_lvds(dev_priv)) {
4970                         drm_dbg_kms(&dev_priv->drm,
4971                                     "Odd pipe source width not supported with dual link LVDS\n");
4972                         return -EINVAL;
4973                 }
4974         }
4975
4976         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4977          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4978          */
4979         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4980             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4981                 return -EINVAL;
4982
4983         intel_crtc_compute_pixel_rate(pipe_config);
4984
4985         if (pipe_config->has_pch_encoder)
4986                 return ilk_fdi_compute_config(crtc, pipe_config);
4987
4988         return 0;
4989 }
4990
4991 static void
4992 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4993 {
4994         while (*num > DATA_LINK_M_N_MASK ||
4995                *den > DATA_LINK_M_N_MASK) {
4996                 *num >>= 1;
4997                 *den >>= 1;
4998         }
4999 }
5000
5001 static void compute_m_n(unsigned int m, unsigned int n,
5002                         u32 *ret_m, u32 *ret_n,
5003                         bool constant_n)
5004 {
5005         /*
5006          * Several DP dongles in particular seem to be fussy about
5007          * too large link M/N values. Give N value as 0x8000 that
5008          * should be acceptable by specific devices. 0x8000 is the
5009          * specified fixed N value for asynchronous clock mode,
5010          * which the devices expect also in synchronous clock mode.
5011          */
5012         if (constant_n)
5013                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
5014         else
5015                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5016
5017         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
5018         intel_reduce_m_n_ratio(ret_m, ret_n);
5019 }
5020
5021 void
5022 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
5023                        int pixel_clock, int link_clock,
5024                        struct intel_link_m_n *m_n,
5025                        bool constant_n, bool fec_enable)
5026 {
5027         u32 data_clock = bits_per_pixel * pixel_clock;
5028
5029         if (fec_enable)
5030                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
5031
5032         m_n->tu = 64;
5033         compute_m_n(data_clock,
5034                     link_clock * nlanes * 8,
5035                     &m_n->gmch_m, &m_n->gmch_n,
5036                     constant_n);
5037
5038         compute_m_n(pixel_clock, link_clock,
5039                     &m_n->link_m, &m_n->link_n,
5040                     constant_n);
5041 }
5042
5043 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
5044 {
5045         /*
5046          * There may be no VBT; and if the BIOS enabled SSC we can
5047          * just keep using it to avoid unnecessary flicker.  Whereas if the
5048          * BIOS isn't using it, don't assume it will work even if the VBT
5049          * indicates as much.
5050          */
5051         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5052                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
5053                                                        PCH_DREF_CONTROL) &
5054                         DREF_SSC1_ENABLE;
5055
5056                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
5057                         drm_dbg_kms(&dev_priv->drm,
5058                                     "SSC %s by BIOS, overriding VBT which says %s\n",
5059                                     enableddisabled(bios_lvds_use_ssc),
5060                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
5061                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
5062                 }
5063         }
5064 }
5065
5066 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5067                                          const struct intel_link_m_n *m_n)
5068 {
5069         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5070         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5071         enum pipe pipe = crtc->pipe;
5072
5073         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
5074                        TU_SIZE(m_n->tu) | m_n->gmch_m);
5075         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5076         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5077         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5078 }
5079
5080 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
5081                                  enum transcoder transcoder)
5082 {
5083         if (IS_HASWELL(dev_priv))
5084                 return transcoder == TRANSCODER_EDP;
5085
5086         /*
5087          * Strictly speaking some registers are available before
5088          * gen7, but we only support DRRS on gen7+
5089          */
5090         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
5091 }
5092
5093 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5094                                          const struct intel_link_m_n *m_n,
5095                                          const struct intel_link_m_n *m2_n2)
5096 {
5097         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5098         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5099         enum pipe pipe = crtc->pipe;
5100         enum transcoder transcoder = crtc_state->cpu_transcoder;
5101
5102         if (INTEL_GEN(dev_priv) >= 5) {
5103                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
5104                                TU_SIZE(m_n->tu) | m_n->gmch_m);
5105                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
5106                                m_n->gmch_n);
5107                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
5108                                m_n->link_m);
5109                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
5110                                m_n->link_n);
5111                 /*
5112                  *  M2_N2 registers are set only if DRRS is supported
5113                  * (to make sure the registers are not unnecessarily accessed).
5114                  */
5115                 if (m2_n2 && crtc_state->has_drrs &&
5116                     transcoder_has_m2_n2(dev_priv, transcoder)) {
5117                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
5118                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5119                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
5120                                        m2_n2->gmch_n);
5121                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
5122                                        m2_n2->link_m);
5123                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
5124                                        m2_n2->link_n);
5125                 }
5126         } else {
5127                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
5128                                TU_SIZE(m_n->tu) | m_n->gmch_m);
5129                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5130                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
5131                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
5132         }
5133 }
5134
5135 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
5136 {
5137         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5138         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
5139
5140         if (m_n == M1_N1) {
5141                 dp_m_n = &crtc_state->dp_m_n;
5142                 dp_m2_n2 = &crtc_state->dp_m2_n2;
5143         } else if (m_n == M2_N2) {
5144
5145                 /*
5146                  * M2_N2 registers are not supported. Hence m2_n2 divider value
5147                  * needs to be programmed into M1_N1.
5148                  */
5149                 dp_m_n = &crtc_state->dp_m2_n2;
5150         } else {
5151                 drm_err(&i915->drm, "Unsupported divider value\n");
5152                 return;
5153         }
5154
5155         if (crtc_state->has_pch_encoder)
5156                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
5157         else
5158                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
5159 }
5160
5161 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
5162 {
5163         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5164         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5165         enum pipe pipe = crtc->pipe;
5166         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5167         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
5168         u32 crtc_vtotal, crtc_vblank_end;
5169         int vsyncshift = 0;
5170
5171         /* We need to be careful not to changed the adjusted mode, for otherwise
5172          * the hw state checker will get angry at the mismatch. */
5173         crtc_vtotal = adjusted_mode->crtc_vtotal;
5174         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5175
5176         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5177                 /* the chip adds 2 halflines automatically */
5178                 crtc_vtotal -= 1;
5179                 crtc_vblank_end -= 1;
5180
5181                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5182                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5183                 else
5184                         vsyncshift = adjusted_mode->crtc_hsync_start -
5185                                 adjusted_mode->crtc_htotal / 2;
5186                 if (vsyncshift < 0)
5187                         vsyncshift += adjusted_mode->crtc_htotal;
5188         }
5189
5190         if (INTEL_GEN(dev_priv) > 3)
5191                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
5192                                vsyncshift);
5193
5194         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
5195                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
5196         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
5197                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
5198         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
5199                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
5200
5201         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
5202                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
5203         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
5204                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
5205         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
5206                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
5207
5208         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5209          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5210          * documented on the DDI_FUNC_CTL register description, EDP Input Select
5211          * bits. */
5212         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
5213             (pipe == PIPE_B || pipe == PIPE_C))
5214                 intel_de_write(dev_priv, VTOTAL(pipe),
5215                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5216
5217 }
5218
5219 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
5220 {
5221         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5222         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5223         enum pipe pipe = crtc->pipe;
5224
5225         /* pipesrc controls the size that is scaled from, which should
5226          * always be the user's requested size.
5227          */
5228         intel_de_write(dev_priv, PIPESRC(pipe),
5229                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
5230 }
5231
5232 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
5233 {
5234         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5235         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5236
5237         if (IS_GEN(dev_priv, 2))
5238                 return false;
5239
5240         if (INTEL_GEN(dev_priv) >= 9 ||
5241             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
5242                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
5243         else
5244                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
5245 }
5246
5247 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
5248                                          struct intel_crtc_state *pipe_config)
5249 {
5250         struct drm_device *dev = crtc->base.dev;
5251         struct drm_i915_private *dev_priv = to_i915(dev);
5252         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5253         u32 tmp;
5254
5255         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
5256         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5257         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5258
5259         if (!transcoder_is_dsi(cpu_transcoder)) {
5260                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
5261                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
5262                                                         (tmp & 0xffff) + 1;
5263                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
5264                                                 ((tmp >> 16) & 0xffff) + 1;
5265         }
5266         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
5267         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5268         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5269
5270         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
5271         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5272         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5273
5274         if (!transcoder_is_dsi(cpu_transcoder)) {
5275                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
5276                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
5277                                                         (tmp & 0xffff) + 1;
5278                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
5279                                                 ((tmp >> 16) & 0xffff) + 1;
5280         }
5281         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
5282         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5283         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5284
5285         if (intel_pipe_is_interlaced(pipe_config)) {
5286                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5287                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
5288                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
5289         }
5290 }
5291
5292 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
5293                                     struct intel_crtc_state *pipe_config)
5294 {
5295         struct drm_device *dev = crtc->base.dev;
5296         struct drm_i915_private *dev_priv = to_i915(dev);
5297         u32 tmp;
5298
5299         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
5300         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5301         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5302 }
5303
5304 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
5305 {
5306         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5307         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5308         u32 pipeconf;
5309
5310         pipeconf = 0;
5311
5312         /* we keep both pipes enabled on 830 */
5313         if (IS_I830(dev_priv))
5314                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
5315
5316         if (crtc_state->double_wide)
5317                 pipeconf |= PIPECONF_DOUBLE_WIDE;
5318
5319         /* only g4x and later have fancy bpc/dither controls */
5320         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5321             IS_CHERRYVIEW(dev_priv)) {
5322                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5323                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
5324                         pipeconf |= PIPECONF_DITHER_EN |
5325                                     PIPECONF_DITHER_TYPE_SP;
5326
5327                 switch (crtc_state->pipe_bpp) {
5328                 case 18:
5329                         pipeconf |= PIPECONF_6BPC;
5330                         break;
5331                 case 24:
5332                         pipeconf |= PIPECONF_8BPC;
5333                         break;
5334                 case 30:
5335                         pipeconf |= PIPECONF_10BPC;
5336                         break;
5337                 default:
5338                         /* Case prevented by intel_choose_pipe_bpp_dither. */
5339                         BUG();
5340                 }
5341         }
5342
5343         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5344                 if (INTEL_GEN(dev_priv) < 4 ||
5345                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5346                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5347                 else
5348                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5349         } else {
5350                 pipeconf |= PIPECONF_PROGRESSIVE;
5351         }
5352
5353         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5354              crtc_state->limited_color_range)
5355                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5356
5357         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5358
5359         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5360
5361         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
5362         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
5363 }
5364
5365 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
5366 {
5367         if (IS_I830(dev_priv))
5368                 return false;
5369
5370         return INTEL_GEN(dev_priv) >= 4 ||
5371                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
5372 }
5373
5374 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
5375 {
5376         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5377         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5378         u32 tmp;
5379
5380         if (!i9xx_has_pfit(dev_priv))
5381                 return;
5382
5383         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
5384         if (!(tmp & PFIT_ENABLE))
5385                 return;
5386
5387         /* Check whether the pfit is attached to our pipe. */
5388         if (INTEL_GEN(dev_priv) < 4) {
5389                 if (crtc->pipe != PIPE_B)
5390                         return;
5391         } else {
5392                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5393                         return;
5394         }
5395
5396         crtc_state->gmch_pfit.control = tmp;
5397         crtc_state->gmch_pfit.pgm_ratios =
5398                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
5399 }
5400
5401 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5402                                struct intel_crtc_state *pipe_config)
5403 {
5404         struct drm_device *dev = crtc->base.dev;
5405         struct drm_i915_private *dev_priv = to_i915(dev);
5406         enum pipe pipe = crtc->pipe;
5407         struct dpll clock;
5408         u32 mdiv;
5409         int refclk = 100000;
5410
5411         /* In case of DSI, DPLL will not be used */
5412         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5413                 return;
5414
5415         vlv_dpio_get(dev_priv);
5416         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5417         vlv_dpio_put(dev_priv);
5418
5419         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5420         clock.m2 = mdiv & DPIO_M2DIV_MASK;
5421         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5422         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5423         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5424
5425         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
5426 }
5427
5428 static void chv_crtc_clock_get(struct intel_crtc *crtc,
5429                                struct intel_crtc_state *pipe_config)
5430 {
5431         struct drm_device *dev = crtc->base.dev;
5432         struct drm_i915_private *dev_priv = to_i915(dev);
5433         enum pipe pipe = crtc->pipe;
5434         enum dpio_channel port = vlv_pipe_to_channel(pipe);
5435         struct dpll clock;
5436         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5437         int refclk = 100000;
5438
5439         /* In case of DSI, DPLL will not be used */
5440         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5441                 return;
5442
5443         vlv_dpio_get(dev_priv);
5444         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
5445         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
5446         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
5447         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
5448         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
5449         vlv_dpio_put(dev_priv);
5450
5451         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
5452         clock.m2 = (pll_dw0 & 0xff) << 22;
5453         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
5454                 clock.m2 |= pll_dw2 & 0x3fffff;
5455         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
5456         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
5457         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
5458
5459         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5460 }
5461
5462 static enum intel_output_format
5463 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
5464 {
5465         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5466         u32 tmp;
5467
5468         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5469
5470         if (tmp & PIPEMISC_YUV420_ENABLE) {
5471                 /* We support 4:2:0 in full blend mode only */
5472                 drm_WARN_ON(&dev_priv->drm,
5473                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
5474
5475                 return INTEL_OUTPUT_FORMAT_YCBCR420;
5476         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
5477                 return INTEL_OUTPUT_FORMAT_YCBCR444;
5478         } else {
5479                 return INTEL_OUTPUT_FORMAT_RGB;
5480         }
5481 }
5482
5483 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
5484 {
5485         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5486         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5487         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5488         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
5489         u32 tmp;
5490
5491         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
5492
5493         if (tmp & DISPPLANE_GAMMA_ENABLE)
5494                 crtc_state->gamma_enable = true;
5495
5496         if (!HAS_GMCH(dev_priv) &&
5497             tmp & DISPPLANE_PIPE_CSC_ENABLE)
5498                 crtc_state->csc_enable = true;
5499 }
5500
5501 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5502                                  struct intel_crtc_state *pipe_config)
5503 {
5504         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5505         enum intel_display_power_domain power_domain;
5506         intel_wakeref_t wakeref;
5507         u32 tmp;
5508         bool ret;
5509
5510         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5511         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5512         if (!wakeref)
5513                 return false;
5514
5515         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5516         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5517         pipe_config->shared_dpll = NULL;
5518
5519         ret = false;
5520
5521         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5522         if (!(tmp & PIPECONF_ENABLE))
5523                 goto out;
5524
5525         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5526             IS_CHERRYVIEW(dev_priv)) {
5527                 switch (tmp & PIPECONF_BPC_MASK) {
5528                 case PIPECONF_6BPC:
5529                         pipe_config->pipe_bpp = 18;
5530                         break;
5531                 case PIPECONF_8BPC:
5532                         pipe_config->pipe_bpp = 24;
5533                         break;
5534                 case PIPECONF_10BPC:
5535                         pipe_config->pipe_bpp = 30;
5536                         break;
5537                 default:
5538                         break;
5539                 }
5540         }
5541
5542         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5543             (tmp & PIPECONF_COLOR_RANGE_SELECT))
5544                 pipe_config->limited_color_range = true;
5545
5546         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
5547                 PIPECONF_GAMMA_MODE_SHIFT;
5548
5549         if (IS_CHERRYVIEW(dev_priv))
5550                 pipe_config->cgm_mode = intel_de_read(dev_priv,
5551                                                       CGM_PIPE_MODE(crtc->pipe));
5552
5553         i9xx_get_pipe_color_config(pipe_config);
5554         intel_color_get_config(pipe_config);
5555
5556         if (INTEL_GEN(dev_priv) < 4)
5557                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5558
5559         intel_get_transcoder_timings(crtc, pipe_config);
5560         intel_get_pipe_src_size(crtc, pipe_config);
5561
5562         i9xx_get_pfit_config(pipe_config);
5563
5564         if (INTEL_GEN(dev_priv) >= 4) {
5565                 /* No way to read it out on pipes B and C */
5566                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
5567                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
5568                 else
5569                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
5570                 pipe_config->pixel_multiplier =
5571                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5572                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5573                 pipe_config->dpll_hw_state.dpll_md = tmp;
5574         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5575                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
5576                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
5577                 pipe_config->pixel_multiplier =
5578                         ((tmp & SDVO_MULTIPLIER_MASK)
5579                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5580         } else {
5581                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
5582                  * port and will be fixed up in the encoder->get_config
5583                  * function. */
5584                 pipe_config->pixel_multiplier = 1;
5585         }
5586         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
5587                                                         DPLL(crtc->pipe));
5588         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
5589                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
5590                                                                FP0(crtc->pipe));
5591                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
5592                                                                FP1(crtc->pipe));
5593         } else {
5594                 /* Mask out read-only status bits. */
5595                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5596                                                      DPLL_PORTC_READY_MASK |
5597                                                      DPLL_PORTB_READY_MASK);
5598         }
5599
5600         if (IS_CHERRYVIEW(dev_priv))
5601                 chv_crtc_clock_get(crtc, pipe_config);
5602         else if (IS_VALLEYVIEW(dev_priv))
5603                 vlv_crtc_clock_get(crtc, pipe_config);
5604         else
5605                 i9xx_crtc_clock_get(crtc, pipe_config);
5606
5607         /*
5608          * Normally the dotclock is filled in by the encoder .get_config()
5609          * but in case the pipe is enabled w/o any ports we need a sane
5610          * default.
5611          */
5612         pipe_config->hw.adjusted_mode.crtc_clock =
5613                 pipe_config->port_clock / pipe_config->pixel_multiplier;
5614
5615         ret = true;
5616
5617 out:
5618         intel_display_power_put(dev_priv, power_domain, wakeref);
5619
5620         return ret;
5621 }
5622
5623 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5624 {
5625         struct intel_encoder *encoder;
5626         int i;
5627         u32 val, final;
5628         bool has_lvds = false;
5629         bool has_cpu_edp = false;
5630         bool has_panel = false;
5631         bool has_ck505 = false;
5632         bool can_ssc = false;
5633         bool using_ssc_source = false;
5634
5635         /* We need to take the global config into account */
5636         for_each_intel_encoder(&dev_priv->drm, encoder) {
5637                 switch (encoder->type) {
5638                 case INTEL_OUTPUT_LVDS:
5639                         has_panel = true;
5640                         has_lvds = true;
5641                         break;
5642                 case INTEL_OUTPUT_EDP:
5643                         has_panel = true;
5644                         if (encoder->port == PORT_A)
5645                                 has_cpu_edp = true;
5646                         break;
5647                 default:
5648                         break;
5649                 }
5650         }
5651
5652         if (HAS_PCH_IBX(dev_priv)) {
5653                 has_ck505 = dev_priv->vbt.display_clock_mode;
5654                 can_ssc = has_ck505;
5655         } else {
5656                 has_ck505 = false;
5657                 can_ssc = true;
5658         }
5659
5660         /* Check if any DPLLs are using the SSC source */
5661         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5662                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5663
5664                 if (!(temp & DPLL_VCO_ENABLE))
5665                         continue;
5666
5667                 if ((temp & PLL_REF_INPUT_MASK) ==
5668                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5669                         using_ssc_source = true;
5670                         break;
5671                 }
5672         }
5673
5674         drm_dbg_kms(&dev_priv->drm,
5675                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5676                     has_panel, has_lvds, has_ck505, using_ssc_source);
5677
5678         /* Ironlake: try to setup display ref clock before DPLL
5679          * enabling. This is only under driver's control after
5680          * PCH B stepping, previous chipset stepping should be
5681          * ignoring this setting.
5682          */
5683         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5684
5685         /* As we must carefully and slowly disable/enable each source in turn,
5686          * compute the final state we want first and check if we need to
5687          * make any changes at all.
5688          */
5689         final = val;
5690         final &= ~DREF_NONSPREAD_SOURCE_MASK;
5691         if (has_ck505)
5692                 final |= DREF_NONSPREAD_CK505_ENABLE;
5693         else
5694                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
5695
5696         final &= ~DREF_SSC_SOURCE_MASK;
5697         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5698         final &= ~DREF_SSC1_ENABLE;
5699
5700         if (has_panel) {
5701                 final |= DREF_SSC_SOURCE_ENABLE;
5702
5703                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5704                         final |= DREF_SSC1_ENABLE;
5705
5706                 if (has_cpu_edp) {
5707                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
5708                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5709                         else
5710                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5711                 } else
5712                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5713         } else if (using_ssc_source) {
5714                 final |= DREF_SSC_SOURCE_ENABLE;
5715                 final |= DREF_SSC1_ENABLE;
5716         }
5717
5718         if (final == val)
5719                 return;
5720
5721         /* Always enable nonspread source */
5722         val &= ~DREF_NONSPREAD_SOURCE_MASK;
5723
5724         if (has_ck505)
5725                 val |= DREF_NONSPREAD_CK505_ENABLE;
5726         else
5727                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
5728
5729         if (has_panel) {
5730                 val &= ~DREF_SSC_SOURCE_MASK;
5731                 val |= DREF_SSC_SOURCE_ENABLE;
5732
5733                 /* SSC must be turned on before enabling the CPU output  */
5734                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5735                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5736                         val |= DREF_SSC1_ENABLE;
5737                 } else
5738                         val &= ~DREF_SSC1_ENABLE;
5739
5740                 /* Get SSC going before enabling the outputs */
5741                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5742                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5743                 udelay(200);
5744
5745                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5746
5747                 /* Enable CPU source on CPU attached eDP */
5748                 if (has_cpu_edp) {
5749                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5750                                 drm_dbg_kms(&dev_priv->drm,
5751                                             "Using SSC on eDP\n");
5752                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5753                         } else
5754                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5755                 } else
5756                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5757
5758                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5759                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5760                 udelay(200);
5761         } else {
5762                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5763
5764                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5765
5766                 /* Turn off CPU output */
5767                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5768
5769                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5770                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5771                 udelay(200);
5772
5773                 if (!using_ssc_source) {
5774                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5775
5776                         /* Turn off the SSC source */
5777                         val &= ~DREF_SSC_SOURCE_MASK;
5778                         val |= DREF_SSC_SOURCE_DISABLE;
5779
5780                         /* Turn off SSC1 */
5781                         val &= ~DREF_SSC1_ENABLE;
5782
5783                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5784                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5785                         udelay(200);
5786                 }
5787         }
5788
5789         BUG_ON(val != final);
5790 }
5791
5792 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5793 {
5794         u32 tmp;
5795
5796         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5797         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5798         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5799
5800         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5801                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5802                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5803
5804         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5805         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5806         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5807
5808         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5809                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5810                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5811 }
5812
5813 /* WaMPhyProgramming:hsw */
5814 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5815 {
5816         u32 tmp;
5817
5818         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5819         tmp &= ~(0xFF << 24);
5820         tmp |= (0x12 << 24);
5821         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5822
5823         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5824         tmp |= (1 << 11);
5825         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5826
5827         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5828         tmp |= (1 << 11);
5829         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5830
5831         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5832         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5833         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5834
5835         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5836         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5837         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5838
5839         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5840         tmp &= ~(7 << 13);
5841         tmp |= (5 << 13);
5842         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5843
5844         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5845         tmp &= ~(7 << 13);
5846         tmp |= (5 << 13);
5847         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5848
5849         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5850         tmp &= ~0xFF;
5851         tmp |= 0x1C;
5852         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5853
5854         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5855         tmp &= ~0xFF;
5856         tmp |= 0x1C;
5857         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5858
5859         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5860         tmp &= ~(0xFF << 16);
5861         tmp |= (0x1C << 16);
5862         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5863
5864         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5865         tmp &= ~(0xFF << 16);
5866         tmp |= (0x1C << 16);
5867         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5868
5869         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5870         tmp |= (1 << 27);
5871         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5872
5873         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5874         tmp |= (1 << 27);
5875         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5876
5877         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5878         tmp &= ~(0xF << 28);
5879         tmp |= (4 << 28);
5880         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5881
5882         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5883         tmp &= ~(0xF << 28);
5884         tmp |= (4 << 28);
5885         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5886 }
5887
5888 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5889  * Programming" based on the parameters passed:
5890  * - Sequence to enable CLKOUT_DP
5891  * - Sequence to enable CLKOUT_DP without spread
5892  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5893  */
5894 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5895                                  bool with_spread, bool with_fdi)
5896 {
5897         u32 reg, tmp;
5898
5899         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5900                      "FDI requires downspread\n"))
5901                 with_spread = true;
5902         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5903                      with_fdi, "LP PCH doesn't have FDI\n"))
5904                 with_fdi = false;
5905
5906         mutex_lock(&dev_priv->sb_lock);
5907
5908         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5909         tmp &= ~SBI_SSCCTL_DISABLE;
5910         tmp |= SBI_SSCCTL_PATHALT;
5911         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5912
5913         udelay(24);
5914
5915         if (with_spread) {
5916                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5917                 tmp &= ~SBI_SSCCTL_PATHALT;
5918                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5919
5920                 if (with_fdi) {
5921                         lpt_reset_fdi_mphy(dev_priv);
5922                         lpt_program_fdi_mphy(dev_priv);
5923                 }
5924         }
5925
5926         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5927         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5928         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5929         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5930
5931         mutex_unlock(&dev_priv->sb_lock);
5932 }
5933
5934 /* Sequence to disable CLKOUT_DP */
5935 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5936 {
5937         u32 reg, tmp;
5938
5939         mutex_lock(&dev_priv->sb_lock);
5940
5941         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5942         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5943         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5944         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5945
5946         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5947         if (!(tmp & SBI_SSCCTL_DISABLE)) {
5948                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5949                         tmp |= SBI_SSCCTL_PATHALT;
5950                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5951                         udelay(32);
5952                 }
5953                 tmp |= SBI_SSCCTL_DISABLE;
5954                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5955         }
5956
5957         mutex_unlock(&dev_priv->sb_lock);
5958 }
5959
5960 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5961
5962 static const u16 sscdivintphase[] = {
5963         [BEND_IDX( 50)] = 0x3B23,
5964         [BEND_IDX( 45)] = 0x3B23,
5965         [BEND_IDX( 40)] = 0x3C23,
5966         [BEND_IDX( 35)] = 0x3C23,
5967         [BEND_IDX( 30)] = 0x3D23,
5968         [BEND_IDX( 25)] = 0x3D23,
5969         [BEND_IDX( 20)] = 0x3E23,
5970         [BEND_IDX( 15)] = 0x3E23,
5971         [BEND_IDX( 10)] = 0x3F23,
5972         [BEND_IDX(  5)] = 0x3F23,
5973         [BEND_IDX(  0)] = 0x0025,
5974         [BEND_IDX( -5)] = 0x0025,
5975         [BEND_IDX(-10)] = 0x0125,
5976         [BEND_IDX(-15)] = 0x0125,
5977         [BEND_IDX(-20)] = 0x0225,
5978         [BEND_IDX(-25)] = 0x0225,
5979         [BEND_IDX(-30)] = 0x0325,
5980         [BEND_IDX(-35)] = 0x0325,
5981         [BEND_IDX(-40)] = 0x0425,
5982         [BEND_IDX(-45)] = 0x0425,
5983         [BEND_IDX(-50)] = 0x0525,
5984 };
5985
5986 /*
5987  * Bend CLKOUT_DP
5988  * steps -50 to 50 inclusive, in steps of 5
5989  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5990  * change in clock period = -(steps / 10) * 5.787 ps
5991  */
5992 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5993 {
5994         u32 tmp;
5995         int idx = BEND_IDX(steps);
5996
5997         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5998                 return;
5999
6000         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
6001                 return;
6002
6003         mutex_lock(&dev_priv->sb_lock);
6004
6005         if (steps % 10 != 0)
6006                 tmp = 0xAAAAAAAB;
6007         else
6008                 tmp = 0x00000000;
6009         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
6010
6011         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
6012         tmp &= 0xffff0000;
6013         tmp |= sscdivintphase[idx];
6014         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
6015
6016         mutex_unlock(&dev_priv->sb_lock);
6017 }
6018
6019 #undef BEND_IDX
6020
6021 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
6022 {
6023         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
6024         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
6025
6026         if ((ctl & SPLL_PLL_ENABLE) == 0)
6027                 return false;
6028
6029         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
6030             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6031                 return true;
6032
6033         if (IS_BROADWELL(dev_priv) &&
6034             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
6035                 return true;
6036
6037         return false;
6038 }
6039
6040 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
6041                                enum intel_dpll_id id)
6042 {
6043         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
6044         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
6045
6046         if ((ctl & WRPLL_PLL_ENABLE) == 0)
6047                 return false;
6048
6049         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
6050                 return true;
6051
6052         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
6053             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
6054             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
6055                 return true;
6056
6057         return false;
6058 }
6059
6060 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
6061 {
6062         struct intel_encoder *encoder;
6063         bool has_fdi = false;
6064
6065         for_each_intel_encoder(&dev_priv->drm, encoder) {
6066                 switch (encoder->type) {
6067                 case INTEL_OUTPUT_ANALOG:
6068                         has_fdi = true;
6069                         break;
6070                 default:
6071                         break;
6072                 }
6073         }
6074
6075         /*
6076          * The BIOS may have decided to use the PCH SSC
6077          * reference so we must not disable it until the
6078          * relevant PLLs have stopped relying on it. We'll
6079          * just leave the PCH SSC reference enabled in case
6080          * any active PLL is using it. It will get disabled
6081          * after runtime suspend if we don't have FDI.
6082          *
6083          * TODO: Move the whole reference clock handling
6084          * to the modeset sequence proper so that we can
6085          * actually enable/disable/reconfigure these things
6086          * safely. To do that we need to introduce a real
6087          * clock hierarchy. That would also allow us to do
6088          * clock bending finally.
6089          */
6090         dev_priv->pch_ssc_use = 0;
6091
6092         if (spll_uses_pch_ssc(dev_priv)) {
6093                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
6094                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
6095         }
6096
6097         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
6098                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
6099                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
6100         }
6101
6102         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
6103                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
6104                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
6105         }
6106
6107         if (dev_priv->pch_ssc_use)
6108                 return;
6109
6110         if (has_fdi) {
6111                 lpt_bend_clkout_dp(dev_priv, 0);
6112                 lpt_enable_clkout_dp(dev_priv, true, true);
6113         } else {
6114                 lpt_disable_clkout_dp(dev_priv);
6115         }
6116 }
6117
6118 /*
6119  * Initialize reference clocks when the driver loads
6120  */
6121 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
6122 {
6123         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
6124                 ilk_init_pch_refclk(dev_priv);
6125         else if (HAS_PCH_LPT(dev_priv))
6126                 lpt_init_pch_refclk(dev_priv);
6127 }
6128
6129 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
6130 {
6131         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6132         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6133         enum pipe pipe = crtc->pipe;
6134         u32 val;
6135
6136         val = 0;
6137
6138         switch (crtc_state->pipe_bpp) {
6139         case 18:
6140                 val |= PIPECONF_6BPC;
6141                 break;
6142         case 24:
6143                 val |= PIPECONF_8BPC;
6144                 break;
6145         case 30:
6146                 val |= PIPECONF_10BPC;
6147                 break;
6148         case 36:
6149                 val |= PIPECONF_12BPC;
6150                 break;
6151         default:
6152                 /* Case prevented by intel_choose_pipe_bpp_dither. */
6153                 BUG();
6154         }
6155
6156         if (crtc_state->dither)
6157                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6158
6159         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6160                 val |= PIPECONF_INTERLACED_ILK;
6161         else
6162                 val |= PIPECONF_PROGRESSIVE;
6163
6164         /*
6165          * This would end up with an odd purple hue over
6166          * the entire display. Make sure we don't do it.
6167          */
6168         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
6169                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
6170
6171         if (crtc_state->limited_color_range &&
6172             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6173                 val |= PIPECONF_COLOR_RANGE_SELECT;
6174
6175         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6176                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
6177
6178         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
6179
6180         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
6181
6182         intel_de_write(dev_priv, PIPECONF(pipe), val);
6183         intel_de_posting_read(dev_priv, PIPECONF(pipe));
6184 }
6185
6186 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
6187 {
6188         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6189         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6190         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6191         u32 val = 0;
6192
6193         if (IS_HASWELL(dev_priv) && crtc_state->dither)
6194                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6195
6196         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6197                 val |= PIPECONF_INTERLACED_ILK;
6198         else
6199                 val |= PIPECONF_PROGRESSIVE;
6200
6201         if (IS_HASWELL(dev_priv) &&
6202             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
6203                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
6204
6205         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
6206         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
6207 }
6208
6209 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
6210 {
6211         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6212         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6213         u32 val = 0;
6214
6215         switch (crtc_state->pipe_bpp) {
6216         case 18:
6217                 val |= PIPEMISC_DITHER_6_BPC;
6218                 break;
6219         case 24:
6220                 val |= PIPEMISC_DITHER_8_BPC;
6221                 break;
6222         case 30:
6223                 val |= PIPEMISC_DITHER_10_BPC;
6224                 break;
6225         case 36:
6226                 val |= PIPEMISC_DITHER_12_BPC;
6227                 break;
6228         default:
6229                 MISSING_CASE(crtc_state->pipe_bpp);
6230                 break;
6231         }
6232
6233         if (crtc_state->dither)
6234                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6235
6236         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6237             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
6238                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
6239
6240         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
6241                 val |= PIPEMISC_YUV420_ENABLE |
6242                         PIPEMISC_YUV420_MODE_FULL_BLEND;
6243
6244         if (INTEL_GEN(dev_priv) >= 11 &&
6245             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
6246                                            BIT(PLANE_CURSOR))) == 0)
6247                 val |= PIPEMISC_HDR_MODE_PRECISION;
6248
6249         if (INTEL_GEN(dev_priv) >= 12)
6250                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
6251
6252         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
6253 }
6254
6255 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
6256 {
6257         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6258         u32 tmp;
6259
6260         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
6261
6262         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
6263         case PIPEMISC_DITHER_6_BPC:
6264                 return 18;
6265         case PIPEMISC_DITHER_8_BPC:
6266                 return 24;
6267         case PIPEMISC_DITHER_10_BPC:
6268                 return 30;
6269         case PIPEMISC_DITHER_12_BPC:
6270                 return 36;
6271         default:
6272                 MISSING_CASE(tmp);
6273                 return 0;
6274         }
6275 }
6276
6277 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
6278 {
6279         /*
6280          * Account for spread spectrum to avoid
6281          * oversubscribing the link. Max center spread
6282          * is 2.5%; use 5% for safety's sake.
6283          */
6284         u32 bps = target_clock * bpp * 21 / 20;
6285         return DIV_ROUND_UP(bps, link_bw * 8);
6286 }
6287
6288 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6289                                          struct intel_link_m_n *m_n)
6290 {
6291         struct drm_device *dev = crtc->base.dev;
6292         struct drm_i915_private *dev_priv = to_i915(dev);
6293         enum pipe pipe = crtc->pipe;
6294
6295         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
6296         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
6297         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6298                 & ~TU_SIZE_MASK;
6299         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
6300         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
6301                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6302 }
6303
6304 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6305                                          enum transcoder transcoder,
6306                                          struct intel_link_m_n *m_n,
6307                                          struct intel_link_m_n *m2_n2)
6308 {
6309         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6310         enum pipe pipe = crtc->pipe;
6311
6312         if (INTEL_GEN(dev_priv) >= 5) {
6313                 m_n->link_m = intel_de_read(dev_priv,
6314                                             PIPE_LINK_M1(transcoder));
6315                 m_n->link_n = intel_de_read(dev_priv,
6316                                             PIPE_LINK_N1(transcoder));
6317                 m_n->gmch_m = intel_de_read(dev_priv,
6318                                             PIPE_DATA_M1(transcoder))
6319                         & ~TU_SIZE_MASK;
6320                 m_n->gmch_n = intel_de_read(dev_priv,
6321                                             PIPE_DATA_N1(transcoder));
6322                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
6323                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6324
6325                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
6326                         m2_n2->link_m = intel_de_read(dev_priv,
6327                                                       PIPE_LINK_M2(transcoder));
6328                         m2_n2->link_n = intel_de_read(dev_priv,
6329                                                              PIPE_LINK_N2(transcoder));
6330                         m2_n2->gmch_m = intel_de_read(dev_priv,
6331                                                              PIPE_DATA_M2(transcoder))
6332                                         & ~TU_SIZE_MASK;
6333                         m2_n2->gmch_n = intel_de_read(dev_priv,
6334                                                              PIPE_DATA_N2(transcoder));
6335                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
6336                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6337                 }
6338         } else {
6339                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
6340                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
6341                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6342                         & ~TU_SIZE_MASK;
6343                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
6344                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
6345                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6346         }
6347 }
6348
6349 void intel_dp_get_m_n(struct intel_crtc *crtc,
6350                       struct intel_crtc_state *pipe_config)
6351 {
6352         if (pipe_config->has_pch_encoder)
6353                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6354         else
6355                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6356                                              &pipe_config->dp_m_n,
6357                                              &pipe_config->dp_m2_n2);
6358 }
6359
6360 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
6361                                    struct intel_crtc_state *pipe_config)
6362 {
6363         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6364                                      &pipe_config->fdi_m_n, NULL);
6365 }
6366
6367 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
6368                                   u32 pos, u32 size)
6369 {
6370         drm_rect_init(&crtc_state->pch_pfit.dst,
6371                       pos >> 16, pos & 0xffff,
6372                       size >> 16, size & 0xffff);
6373 }
6374
6375 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
6376 {
6377         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6379         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
6380         int id = -1;
6381         int i;
6382
6383         /* find scaler attached to this pipe */
6384         for (i = 0; i < crtc->num_scalers; i++) {
6385                 u32 ctl, pos, size;
6386
6387                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
6388                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
6389                         continue;
6390
6391                 id = i;
6392                 crtc_state->pch_pfit.enabled = true;
6393
6394                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
6395                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
6396
6397                 ilk_get_pfit_pos_size(crtc_state, pos, size);
6398
6399                 scaler_state->scalers[i].in_use = true;
6400                 break;
6401         }
6402
6403         scaler_state->scaler_id = id;
6404         if (id >= 0)
6405                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
6406         else
6407                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
6408 }
6409
6410 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
6411 {
6412         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6413         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6414         u32 ctl, pos, size;
6415
6416         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
6417         if ((ctl & PF_ENABLE) == 0)
6418                 return;
6419
6420         crtc_state->pch_pfit.enabled = true;
6421
6422         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
6423         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
6424
6425         ilk_get_pfit_pos_size(crtc_state, pos, size);
6426
6427         /*
6428          * We currently do not free assignements of panel fitters on
6429          * ivb/hsw (since we don't use the higher upscaling modes which
6430          * differentiates them) so just WARN about this case for now.
6431          */
6432         drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
6433                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
6434 }
6435
6436 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
6437                                 struct intel_crtc_state *pipe_config)
6438 {
6439         struct drm_device *dev = crtc->base.dev;
6440         struct drm_i915_private *dev_priv = to_i915(dev);
6441         enum intel_display_power_domain power_domain;
6442         intel_wakeref_t wakeref;
6443         u32 tmp;
6444         bool ret;
6445
6446         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6447         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6448         if (!wakeref)
6449                 return false;
6450
6451         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6452         pipe_config->shared_dpll = NULL;
6453
6454         ret = false;
6455         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6456         if (!(tmp & PIPECONF_ENABLE))
6457                 goto out;
6458
6459         switch (tmp & PIPECONF_BPC_MASK) {
6460         case PIPECONF_6BPC:
6461                 pipe_config->pipe_bpp = 18;
6462                 break;
6463         case PIPECONF_8BPC:
6464                 pipe_config->pipe_bpp = 24;
6465                 break;
6466         case PIPECONF_10BPC:
6467                 pipe_config->pipe_bpp = 30;
6468                 break;
6469         case PIPECONF_12BPC:
6470                 pipe_config->pipe_bpp = 36;
6471                 break;
6472         default:
6473                 break;
6474         }
6475
6476         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
6477                 pipe_config->limited_color_range = true;
6478
6479         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
6480         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
6481         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
6482                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6483                 break;
6484         default:
6485                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6486                 break;
6487         }
6488
6489         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
6490                 PIPECONF_GAMMA_MODE_SHIFT;
6491
6492         pipe_config->csc_mode = intel_de_read(dev_priv,
6493                                               PIPE_CSC_MODE(crtc->pipe));
6494
6495         i9xx_get_pipe_color_config(pipe_config);
6496         intel_color_get_config(pipe_config);
6497
6498         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6499                 struct intel_shared_dpll *pll;
6500                 enum intel_dpll_id pll_id;
6501                 bool pll_active;
6502
6503                 pipe_config->has_pch_encoder = true;
6504
6505                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
6506                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6507                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
6508
6509                 ilk_get_fdi_m_n_config(crtc, pipe_config);
6510
6511                 if (HAS_PCH_IBX(dev_priv)) {
6512                         /*
6513                          * The pipe->pch transcoder and pch transcoder->pll
6514                          * mapping is fixed.
6515                          */
6516                         pll_id = (enum intel_dpll_id) crtc->pipe;
6517                 } else {
6518                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6519                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6520                                 pll_id = DPLL_ID_PCH_PLL_B;
6521                         else
6522                                 pll_id= DPLL_ID_PCH_PLL_A;
6523                 }
6524
6525                 pipe_config->shared_dpll =
6526                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
6527                 pll = pipe_config->shared_dpll;
6528
6529                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
6530                                                      &pipe_config->dpll_hw_state);
6531                 drm_WARN_ON(dev, !pll_active);
6532
6533                 tmp = pipe_config->dpll_hw_state.dpll;
6534                 pipe_config->pixel_multiplier =
6535                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6536                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6537
6538                 ilk_pch_clock_get(crtc, pipe_config);
6539         } else {
6540                 pipe_config->pixel_multiplier = 1;
6541         }
6542
6543         intel_get_transcoder_timings(crtc, pipe_config);
6544         intel_get_pipe_src_size(crtc, pipe_config);
6545
6546         ilk_get_pfit_config(pipe_config);
6547
6548         ret = true;
6549
6550 out:
6551         intel_display_power_put(dev_priv, power_domain, wakeref);
6552
6553         return ret;
6554 }
6555
6556 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
6557                                      struct intel_crtc_state *pipe_config,
6558                                      struct intel_display_power_domain_set *power_domain_set)
6559 {
6560         struct drm_device *dev = crtc->base.dev;
6561         struct drm_i915_private *dev_priv = to_i915(dev);
6562         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6563         unsigned long enabled_panel_transcoders = 0;
6564         enum transcoder panel_transcoder;
6565         u32 tmp;
6566
6567         if (INTEL_GEN(dev_priv) >= 11)
6568                 panel_transcoder_mask |=
6569                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6570
6571         /*
6572          * The pipe->transcoder mapping is fixed with the exception of the eDP
6573          * and DSI transcoders handled below.
6574          */
6575         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6576
6577         /*
6578          * XXX: Do intel_display_power_get_if_enabled before reading this (for
6579          * consistency and less surprising code; it's in always on power).
6580          */
6581         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6582                                        panel_transcoder_mask) {
6583                 bool force_thru = false;
6584                 enum pipe trans_pipe;
6585
6586                 tmp = intel_de_read(dev_priv,
6587                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
6588                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6589                         continue;
6590
6591                 /*
6592                  * Log all enabled ones, only use the first one.
6593                  *
6594                  * FIXME: This won't work for two separate DSI displays.
6595                  */
6596                 enabled_panel_transcoders |= BIT(panel_transcoder);
6597                 if (enabled_panel_transcoders != BIT(panel_transcoder))
6598                         continue;
6599
6600                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6601                 default:
6602                         drm_WARN(dev, 1,
6603                                  "unknown pipe linked to transcoder %s\n",
6604                                  transcoder_name(panel_transcoder));
6605                         fallthrough;
6606                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
6607                         force_thru = true;
6608                         fallthrough;
6609                 case TRANS_DDI_EDP_INPUT_A_ON:
6610                         trans_pipe = PIPE_A;
6611                         break;
6612                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
6613                         trans_pipe = PIPE_B;
6614                         break;
6615                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
6616                         trans_pipe = PIPE_C;
6617                         break;
6618                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
6619                         trans_pipe = PIPE_D;
6620                         break;
6621                 }
6622
6623                 if (trans_pipe == crtc->pipe) {
6624                         pipe_config->cpu_transcoder = panel_transcoder;
6625                         pipe_config->pch_pfit.force_thru = force_thru;
6626                 }
6627         }
6628
6629         /*
6630          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6631          */
6632         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6633                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6634
6635         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6636                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6637                 return false;
6638
6639         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6640
6641         return tmp & PIPECONF_ENABLE;
6642 }
6643
6644 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6645                                          struct intel_crtc_state *pipe_config,
6646                                          struct intel_display_power_domain_set *power_domain_set)
6647 {
6648         struct drm_device *dev = crtc->base.dev;
6649         struct drm_i915_private *dev_priv = to_i915(dev);
6650         enum transcoder cpu_transcoder;
6651         enum port port;
6652         u32 tmp;
6653
6654         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6655                 if (port == PORT_A)
6656                         cpu_transcoder = TRANSCODER_DSI_A;
6657                 else
6658                         cpu_transcoder = TRANSCODER_DSI_C;
6659
6660                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6661                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6662                         continue;
6663
6664                 /*
6665                  * The PLL needs to be enabled with a valid divider
6666                  * configuration, otherwise accessing DSI registers will hang
6667                  * the machine. See BSpec North Display Engine
6668                  * registers/MIPI[BXT]. We can break out here early, since we
6669                  * need the same DSI PLL to be enabled for both DSI ports.
6670                  */
6671                 if (!bxt_dsi_pll_is_enabled(dev_priv))
6672                         break;
6673
6674                 /* XXX: this works for video mode only */
6675                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6676                 if (!(tmp & DPI_ENABLE))
6677                         continue;
6678
6679                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6680                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6681                         continue;
6682
6683                 pipe_config->cpu_transcoder = cpu_transcoder;
6684                 break;
6685         }
6686
6687         return transcoder_is_dsi(pipe_config->cpu_transcoder);
6688 }
6689
6690 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6691                                    struct intel_crtc_state *pipe_config)
6692 {
6693         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6694         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6695         enum port port;
6696         u32 tmp;
6697
6698         if (transcoder_is_dsi(cpu_transcoder)) {
6699                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6700                                                 PORT_A : PORT_B;
6701         } else {
6702                 tmp = intel_de_read(dev_priv,
6703                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
6704                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6705                         return;
6706                 if (INTEL_GEN(dev_priv) >= 12)
6707                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6708                 else
6709                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6710         }
6711
6712         /*
6713          * Haswell has only FDI/PCH transcoder A. It is which is connected to
6714          * DDI E. So just check whether this pipe is wired to DDI E and whether
6715          * the PCH transcoder is on.
6716          */
6717         if (INTEL_GEN(dev_priv) < 9 &&
6718             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6719                 pipe_config->has_pch_encoder = true;
6720
6721                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6722                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6723                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
6724
6725                 ilk_get_fdi_m_n_config(crtc, pipe_config);
6726         }
6727 }
6728
6729 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6730                                 struct intel_crtc_state *pipe_config)
6731 {
6732         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6733         struct intel_display_power_domain_set power_domain_set = { };
6734         bool active;
6735         u32 tmp;
6736
6737         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6738                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
6739                 return false;
6740
6741         pipe_config->shared_dpll = NULL;
6742
6743         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6744
6745         if (IS_GEN9_LP(dev_priv) &&
6746             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6747                 drm_WARN_ON(&dev_priv->drm, active);
6748                 active = true;
6749         }
6750
6751         intel_dsc_get_config(pipe_config);
6752
6753         if (!active) {
6754                 /* bigjoiner slave doesn't enable transcoder */
6755                 if (!pipe_config->bigjoiner_slave)
6756                         goto out;
6757
6758                 active = true;
6759                 pipe_config->pixel_multiplier = 1;
6760
6761                 /* we cannot read out most state, so don't bother.. */
6762                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6763         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6764             INTEL_GEN(dev_priv) >= 11) {
6765                 hsw_get_ddi_port_state(crtc, pipe_config);
6766                 intel_get_transcoder_timings(crtc, pipe_config);
6767         }
6768
6769         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6770                 intel_vrr_get_config(crtc, pipe_config);
6771
6772         intel_get_pipe_src_size(crtc, pipe_config);
6773
6774         if (IS_HASWELL(dev_priv)) {
6775                 u32 tmp = intel_de_read(dev_priv,
6776                                         PIPECONF(pipe_config->cpu_transcoder));
6777
6778                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6779                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6780                 else
6781                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6782         } else {
6783                 pipe_config->output_format =
6784                         bdw_get_pipemisc_output_format(crtc);
6785         }
6786
6787         pipe_config->gamma_mode = intel_de_read(dev_priv,
6788                                                 GAMMA_MODE(crtc->pipe));
6789
6790         pipe_config->csc_mode = intel_de_read(dev_priv,
6791                                               PIPE_CSC_MODE(crtc->pipe));
6792
6793         if (INTEL_GEN(dev_priv) >= 9) {
6794                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6795
6796                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6797                         pipe_config->gamma_enable = true;
6798
6799                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6800                         pipe_config->csc_enable = true;
6801         } else {
6802                 i9xx_get_pipe_color_config(pipe_config);
6803         }
6804
6805         intel_color_get_config(pipe_config);
6806
6807         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6808         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6809         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6810                 pipe_config->ips_linetime =
6811                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6812
6813         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6814                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6815                 if (INTEL_GEN(dev_priv) >= 9)
6816                         skl_get_pfit_config(pipe_config);
6817                 else
6818                         ilk_get_pfit_config(pipe_config);
6819         }
6820
6821         if (hsw_crtc_supports_ips(crtc)) {
6822                 if (IS_HASWELL(dev_priv))
6823                         pipe_config->ips_enabled = intel_de_read(dev_priv,
6824                                                                  IPS_CTL) & IPS_ENABLE;
6825                 else {
6826                         /*
6827                          * We cannot readout IPS state on broadwell, set to
6828                          * true so we can set it to a defined state on first
6829                          * commit.
6830                          */
6831                         pipe_config->ips_enabled = true;
6832                 }
6833         }
6834
6835         if (pipe_config->bigjoiner_slave) {
6836                 /* Cannot be read out as a slave, set to 0. */
6837                 pipe_config->pixel_multiplier = 0;
6838         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6839             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6840                 pipe_config->pixel_multiplier =
6841                         intel_de_read(dev_priv,
6842                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6843         } else {
6844                 pipe_config->pixel_multiplier = 1;
6845         }
6846
6847 out:
6848         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6849
6850         return active;
6851 }
6852
6853 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6854 {
6855         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6856         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6857
6858         if (!i915->display.get_pipe_config(crtc, crtc_state))
6859                 return false;
6860
6861         crtc_state->hw.active = true;
6862
6863         intel_crtc_readout_derived_state(crtc_state);
6864
6865         return true;
6866 }
6867
6868 /* VESA 640x480x72Hz mode to set on the pipe */
6869 static const struct drm_display_mode load_detect_mode = {
6870         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6871                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6872 };
6873
6874 struct drm_framebuffer *
6875 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6876                          struct drm_mode_fb_cmd2 *mode_cmd)
6877 {
6878         struct intel_framebuffer *intel_fb;
6879         int ret;
6880
6881         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6882         if (!intel_fb)
6883                 return ERR_PTR(-ENOMEM);
6884
6885         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6886         if (ret)
6887                 goto err;
6888
6889         return &intel_fb->base;
6890
6891 err:
6892         kfree(intel_fb);
6893         return ERR_PTR(ret);
6894 }
6895
6896 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6897                                         struct drm_crtc *crtc)
6898 {
6899         struct drm_plane *plane;
6900         struct drm_plane_state *plane_state;
6901         int ret, i;
6902
6903         ret = drm_atomic_add_affected_planes(state, crtc);
6904         if (ret)
6905                 return ret;
6906
6907         for_each_new_plane_in_state(state, plane, plane_state, i) {
6908                 if (plane_state->crtc != crtc)
6909                         continue;
6910
6911                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6912                 if (ret)
6913                         return ret;
6914
6915                 drm_atomic_set_fb_for_plane(plane_state, NULL);
6916         }
6917
6918         return 0;
6919 }
6920
6921 int intel_get_load_detect_pipe(struct drm_connector *connector,
6922                                struct intel_load_detect_pipe *old,
6923                                struct drm_modeset_acquire_ctx *ctx)
6924 {
6925         struct intel_crtc *intel_crtc;
6926         struct intel_encoder *intel_encoder =
6927                 intel_attached_encoder(to_intel_connector(connector));
6928         struct drm_crtc *possible_crtc;
6929         struct drm_encoder *encoder = &intel_encoder->base;
6930         struct drm_crtc *crtc = NULL;
6931         struct drm_device *dev = encoder->dev;
6932         struct drm_i915_private *dev_priv = to_i915(dev);
6933         struct drm_mode_config *config = &dev->mode_config;
6934         struct drm_atomic_state *state = NULL, *restore_state = NULL;
6935         struct drm_connector_state *connector_state;
6936         struct intel_crtc_state *crtc_state;
6937         int ret, i = -1;
6938
6939         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6940                     connector->base.id, connector->name,
6941                     encoder->base.id, encoder->name);
6942
6943         old->restore_state = NULL;
6944
6945         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6946
6947         /*
6948          * Algorithm gets a little messy:
6949          *
6950          *   - if the connector already has an assigned crtc, use it (but make
6951          *     sure it's on first)
6952          *
6953          *   - try to find the first unused crtc that can drive this connector,
6954          *     and use that if we find one
6955          */
6956
6957         /* See if we already have a CRTC for this connector */
6958         if (connector->state->crtc) {
6959                 crtc = connector->state->crtc;
6960
6961                 ret = drm_modeset_lock(&crtc->mutex, ctx);
6962                 if (ret)
6963                         goto fail;
6964
6965                 /* Make sure the crtc and connector are running */
6966                 goto found;
6967         }
6968
6969         /* Find an unused one (if possible) */
6970         for_each_crtc(dev, possible_crtc) {
6971                 i++;
6972                 if (!(encoder->possible_crtcs & (1 << i)))
6973                         continue;
6974
6975                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
6976                 if (ret)
6977                         goto fail;
6978
6979                 if (possible_crtc->state->enable) {
6980                         drm_modeset_unlock(&possible_crtc->mutex);
6981                         continue;
6982                 }
6983
6984                 crtc = possible_crtc;
6985                 break;
6986         }
6987
6988         /*
6989          * If we didn't find an unused CRTC, don't use any.
6990          */
6991         if (!crtc) {
6992                 drm_dbg_kms(&dev_priv->drm,
6993                             "no pipe available for load-detect\n");
6994                 ret = -ENODEV;
6995                 goto fail;
6996         }
6997
6998 found:
6999         intel_crtc = to_intel_crtc(crtc);
7000
7001         state = drm_atomic_state_alloc(dev);
7002         restore_state = drm_atomic_state_alloc(dev);
7003         if (!state || !restore_state) {
7004                 ret = -ENOMEM;
7005                 goto fail;
7006         }
7007
7008         state->acquire_ctx = ctx;
7009         restore_state->acquire_ctx = ctx;
7010
7011         connector_state = drm_atomic_get_connector_state(state, connector);
7012         if (IS_ERR(connector_state)) {
7013                 ret = PTR_ERR(connector_state);
7014                 goto fail;
7015         }
7016
7017         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
7018         if (ret)
7019                 goto fail;
7020
7021         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7022         if (IS_ERR(crtc_state)) {
7023                 ret = PTR_ERR(crtc_state);
7024                 goto fail;
7025         }
7026
7027         crtc_state->uapi.active = true;
7028
7029         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
7030                                            &load_detect_mode);
7031         if (ret)
7032                 goto fail;
7033
7034         ret = intel_modeset_disable_planes(state, crtc);
7035         if (ret)
7036                 goto fail;
7037
7038         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
7039         if (!ret)
7040                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
7041         if (!ret)
7042                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
7043         if (ret) {
7044                 drm_dbg_kms(&dev_priv->drm,
7045                             "Failed to create a copy of old state to restore: %i\n",
7046                             ret);
7047                 goto fail;
7048         }
7049
7050         ret = drm_atomic_commit(state);
7051         if (ret) {
7052                 drm_dbg_kms(&dev_priv->drm,
7053                             "failed to set mode on load-detect pipe\n");
7054                 goto fail;
7055         }
7056
7057         old->restore_state = restore_state;
7058         drm_atomic_state_put(state);
7059
7060         /* let the connector get through one full cycle before testing */
7061         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
7062         return true;
7063
7064 fail:
7065         if (state) {
7066                 drm_atomic_state_put(state);
7067                 state = NULL;
7068         }
7069         if (restore_state) {
7070                 drm_atomic_state_put(restore_state);
7071                 restore_state = NULL;
7072         }
7073
7074         if (ret == -EDEADLK)
7075                 return ret;
7076
7077         return false;
7078 }
7079
7080 void intel_release_load_detect_pipe(struct drm_connector *connector,
7081                                     struct intel_load_detect_pipe *old,
7082                                     struct drm_modeset_acquire_ctx *ctx)
7083 {
7084         struct intel_encoder *intel_encoder =
7085                 intel_attached_encoder(to_intel_connector(connector));
7086         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
7087         struct drm_encoder *encoder = &intel_encoder->base;
7088         struct drm_atomic_state *state = old->restore_state;
7089         int ret;
7090
7091         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7092                     connector->base.id, connector->name,
7093                     encoder->base.id, encoder->name);
7094
7095         if (!state)
7096                 return;
7097
7098         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
7099         if (ret)
7100                 drm_dbg_kms(&i915->drm,
7101                             "Couldn't release load detect pipe: %i\n", ret);
7102         drm_atomic_state_put(state);
7103 }
7104
7105 static int i9xx_pll_refclk(struct drm_device *dev,
7106                            const struct intel_crtc_state *pipe_config)
7107 {
7108         struct drm_i915_private *dev_priv = to_i915(dev);
7109         u32 dpll = pipe_config->dpll_hw_state.dpll;
7110
7111         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7112                 return dev_priv->vbt.lvds_ssc_freq;
7113         else if (HAS_PCH_SPLIT(dev_priv))
7114                 return 120000;
7115         else if (!IS_GEN(dev_priv, 2))
7116                 return 96000;
7117         else
7118                 return 48000;
7119 }
7120
7121 /* Returns the clock of the currently programmed mode of the given pipe. */
7122 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7123                                 struct intel_crtc_state *pipe_config)
7124 {
7125         struct drm_device *dev = crtc->base.dev;
7126         struct drm_i915_private *dev_priv = to_i915(dev);
7127         enum pipe pipe = crtc->pipe;
7128         u32 dpll = pipe_config->dpll_hw_state.dpll;
7129         u32 fp;
7130         struct dpll clock;
7131         int port_clock;
7132         int refclk = i9xx_pll_refclk(dev, pipe_config);
7133
7134         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7135                 fp = pipe_config->dpll_hw_state.fp0;
7136         else
7137                 fp = pipe_config->dpll_hw_state.fp1;
7138
7139         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7140         if (IS_PINEVIEW(dev_priv)) {
7141                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7142                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7143         } else {
7144                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7145                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7146         }
7147
7148         if (!IS_GEN(dev_priv, 2)) {
7149                 if (IS_PINEVIEW(dev_priv))
7150                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7151                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7152                 else
7153                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7154                                DPLL_FPA01_P1_POST_DIV_SHIFT);
7155
7156                 switch (dpll & DPLL_MODE_MASK) {
7157                 case DPLLB_MODE_DAC_SERIAL:
7158                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7159                                 5 : 10;
7160                         break;
7161                 case DPLLB_MODE_LVDS:
7162                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7163                                 7 : 14;
7164                         break;
7165                 default:
7166                         drm_dbg_kms(&dev_priv->drm,
7167                                     "Unknown DPLL mode %08x in programmed "
7168                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
7169                         return;
7170                 }
7171
7172                 if (IS_PINEVIEW(dev_priv))
7173                         port_clock = pnv_calc_dpll_params(refclk, &clock);
7174                 else
7175                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
7176         } else {
7177                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
7178                                                                  LVDS);
7179                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7180
7181                 if (is_lvds) {
7182                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7183                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
7184
7185                         if (lvds & LVDS_CLKB_POWER_UP)
7186                                 clock.p2 = 7;
7187                         else
7188                                 clock.p2 = 14;
7189                 } else {
7190                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
7191                                 clock.p1 = 2;
7192                         else {
7193                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7194                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7195                         }
7196                         if (dpll & PLL_P2_DIVIDE_BY_4)
7197                                 clock.p2 = 4;
7198                         else
7199                                 clock.p2 = 2;
7200                 }
7201
7202                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
7203         }
7204
7205         /*
7206          * This value includes pixel_multiplier. We will use
7207          * port_clock to compute adjusted_mode.crtc_clock in the
7208          * encoder's get_config() function.
7209          */
7210         pipe_config->port_clock = port_clock;
7211 }
7212
7213 int intel_dotclock_calculate(int link_freq,
7214                              const struct intel_link_m_n *m_n)
7215 {
7216         /*
7217          * The calculation for the data clock is:
7218          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7219          * But we want to avoid losing precison if possible, so:
7220          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7221          *
7222          * and the link clock is simpler:
7223          * link_clock = (m * link_clock) / n
7224          */
7225
7226         if (!m_n->link_n)
7227                 return 0;
7228
7229         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
7230 }
7231
7232 static void ilk_pch_clock_get(struct intel_crtc *crtc,
7233                               struct intel_crtc_state *pipe_config)
7234 {
7235         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7236
7237         /* read out port_clock from the DPLL */
7238         i9xx_crtc_clock_get(crtc, pipe_config);
7239
7240         /*
7241          * In case there is an active pipe without active ports,
7242          * we may need some idea for the dotclock anyway.
7243          * Calculate one based on the FDI configuration.
7244          */
7245         pipe_config->hw.adjusted_mode.crtc_clock =
7246                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
7247                                          &pipe_config->fdi_m_n);
7248 }
7249
7250 /* Returns the currently programmed mode of the given encoder. */
7251 struct drm_display_mode *
7252 intel_encoder_current_mode(struct intel_encoder *encoder)
7253 {
7254         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
7255         struct intel_crtc_state *crtc_state;
7256         struct drm_display_mode *mode;
7257         struct intel_crtc *crtc;
7258         enum pipe pipe;
7259
7260         if (!encoder->get_hw_state(encoder, &pipe))
7261                 return NULL;
7262
7263         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7264
7265         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7266         if (!mode)
7267                 return NULL;
7268
7269         crtc_state = intel_crtc_state_alloc(crtc);
7270         if (!crtc_state) {
7271                 kfree(mode);
7272                 return NULL;
7273         }
7274
7275         if (!intel_crtc_get_pipe_config(crtc_state)) {
7276                 kfree(crtc_state);
7277                 kfree(mode);
7278                 return NULL;
7279         }
7280
7281         intel_encoder_get_config(encoder, crtc_state);
7282
7283         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
7284
7285         kfree(crtc_state);
7286
7287         return mode;
7288 }
7289
7290 /**
7291  * intel_wm_need_update - Check whether watermarks need updating
7292  * @cur: current plane state
7293  * @new: new plane state
7294  *
7295  * Check current plane state versus the new one to determine whether
7296  * watermarks need to be recalculated.
7297  *
7298  * Returns true or false.
7299  */
7300 static bool intel_wm_need_update(const struct intel_plane_state *cur,
7301                                  struct intel_plane_state *new)
7302 {
7303         /* Update watermarks on tiling or size changes. */
7304         if (new->uapi.visible != cur->uapi.visible)
7305                 return true;
7306
7307         if (!cur->hw.fb || !new->hw.fb)
7308                 return false;
7309
7310         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
7311             cur->hw.rotation != new->hw.rotation ||
7312             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
7313             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
7314             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
7315             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
7316                 return true;
7317
7318         return false;
7319 }
7320
7321 static bool needs_scaling(const struct intel_plane_state *state)
7322 {
7323         int src_w = drm_rect_width(&state->uapi.src) >> 16;
7324         int src_h = drm_rect_height(&state->uapi.src) >> 16;
7325         int dst_w = drm_rect_width(&state->uapi.dst);
7326         int dst_h = drm_rect_height(&state->uapi.dst);
7327
7328         return (src_w != dst_w || src_h != dst_h);
7329 }
7330
7331 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
7332                                     struct intel_crtc_state *crtc_state,
7333                                     const struct intel_plane_state *old_plane_state,
7334                                     struct intel_plane_state *plane_state)
7335 {
7336         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7337         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7338         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7339         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7340         bool was_crtc_enabled = old_crtc_state->hw.active;
7341         bool is_crtc_enabled = crtc_state->hw.active;
7342         bool turn_off, turn_on, visible, was_visible;
7343         int ret;
7344
7345         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
7346                 ret = skl_update_scaler_plane(crtc_state, plane_state);
7347                 if (ret)
7348                         return ret;
7349         }
7350
7351         was_visible = old_plane_state->uapi.visible;
7352         visible = plane_state->uapi.visible;
7353
7354         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
7355                 was_visible = false;
7356
7357         /*
7358          * Visibility is calculated as if the crtc was on, but
7359          * after scaler setup everything depends on it being off
7360          * when the crtc isn't active.
7361          *
7362          * FIXME this is wrong for watermarks. Watermarks should also
7363          * be computed as if the pipe would be active. Perhaps move
7364          * per-plane wm computation to the .check_plane() hook, and
7365          * only combine the results from all planes in the current place?
7366          */
7367         if (!is_crtc_enabled) {
7368                 intel_plane_set_invisible(crtc_state, plane_state);
7369                 visible = false;
7370         }
7371
7372         if (!was_visible && !visible)
7373                 return 0;
7374
7375         turn_off = was_visible && (!visible || mode_changed);
7376         turn_on = visible && (!was_visible || mode_changed);
7377
7378         drm_dbg_atomic(&dev_priv->drm,
7379                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
7380                        crtc->base.base.id, crtc->base.name,
7381                        plane->base.base.id, plane->base.name,
7382                        was_visible, visible,
7383                        turn_off, turn_on, mode_changed);
7384
7385         if (turn_on) {
7386                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7387                         crtc_state->update_wm_pre = true;
7388
7389                 /* must disable cxsr around plane enable/disable */
7390                 if (plane->id != PLANE_CURSOR)
7391                         crtc_state->disable_cxsr = true;
7392         } else if (turn_off) {
7393                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
7394                         crtc_state->update_wm_post = true;
7395
7396                 /* must disable cxsr around plane enable/disable */
7397                 if (plane->id != PLANE_CURSOR)
7398                         crtc_state->disable_cxsr = true;
7399         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
7400                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
7401                         /* FIXME bollocks */
7402                         crtc_state->update_wm_pre = true;
7403                         crtc_state->update_wm_post = true;
7404                 }
7405         }
7406
7407         if (visible || was_visible)
7408                 crtc_state->fb_bits |= plane->frontbuffer_bit;
7409
7410         /*
7411          * ILK/SNB DVSACNTR/Sprite Enable
7412          * IVB SPR_CTL/Sprite Enable
7413          * "When in Self Refresh Big FIFO mode, a write to enable the
7414          *  plane will be internally buffered and delayed while Big FIFO
7415          *  mode is exiting."
7416          *
7417          * Which means that enabling the sprite can take an extra frame
7418          * when we start in big FIFO mode (LP1+). Thus we need to drop
7419          * down to LP0 and wait for vblank in order to make sure the
7420          * sprite gets enabled on the next vblank after the register write.
7421          * Doing otherwise would risk enabling the sprite one frame after
7422          * we've already signalled flip completion. We can resume LP1+
7423          * once the sprite has been enabled.
7424          *
7425          *
7426          * WaCxSRDisabledForSpriteScaling:ivb
7427          * IVB SPR_SCALE/Scaling Enable
7428          * "Low Power watermarks must be disabled for at least one
7429          *  frame before enabling sprite scaling, and kept disabled
7430          *  until sprite scaling is disabled."
7431          *
7432          * ILK/SNB DVSASCALE/Scaling Enable
7433          * "When in Self Refresh Big FIFO mode, scaling enable will be
7434          *  masked off while Big FIFO mode is exiting."
7435          *
7436          * Despite the w/a only being listed for IVB we assume that
7437          * the ILK/SNB note has similar ramifications, hence we apply
7438          * the w/a on all three platforms.
7439          *
7440          * With experimental results seems this is needed also for primary
7441          * plane, not only sprite plane.
7442          */
7443         if (plane->id != PLANE_CURSOR &&
7444             (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
7445              IS_IVYBRIDGE(dev_priv)) &&
7446             (turn_on || (!needs_scaling(old_plane_state) &&
7447                          needs_scaling(plane_state))))
7448                 crtc_state->disable_lp_wm = true;
7449
7450         return 0;
7451 }
7452
7453 static bool encoders_cloneable(const struct intel_encoder *a,
7454                                const struct intel_encoder *b)
7455 {
7456         /* masks could be asymmetric, so check both ways */
7457         return a == b || (a->cloneable & (1 << b->type) &&
7458                           b->cloneable & (1 << a->type));
7459 }
7460
7461 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
7462                                          struct intel_crtc *crtc,
7463                                          struct intel_encoder *encoder)
7464 {
7465         struct intel_encoder *source_encoder;
7466         struct drm_connector *connector;
7467         struct drm_connector_state *connector_state;
7468         int i;
7469
7470         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7471                 if (connector_state->crtc != &crtc->base)
7472                         continue;
7473
7474                 source_encoder =
7475                         to_intel_encoder(connector_state->best_encoder);
7476                 if (!encoders_cloneable(encoder, source_encoder))
7477                         return false;
7478         }
7479
7480         return true;
7481 }
7482
7483 static int icl_add_linked_planes(struct intel_atomic_state *state)
7484 {
7485         struct intel_plane *plane, *linked;
7486         struct intel_plane_state *plane_state, *linked_plane_state;
7487         int i;
7488
7489         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7490                 linked = plane_state->planar_linked_plane;
7491
7492                 if (!linked)
7493                         continue;
7494
7495                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
7496                 if (IS_ERR(linked_plane_state))
7497                         return PTR_ERR(linked_plane_state);
7498
7499                 drm_WARN_ON(state->base.dev,
7500                             linked_plane_state->planar_linked_plane != plane);
7501                 drm_WARN_ON(state->base.dev,
7502                             linked_plane_state->planar_slave == plane_state->planar_slave);
7503         }
7504
7505         return 0;
7506 }
7507
7508 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
7509 {
7510         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7512         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
7513         struct intel_plane *plane, *linked;
7514         struct intel_plane_state *plane_state;
7515         int i;
7516
7517         if (INTEL_GEN(dev_priv) < 11)
7518                 return 0;
7519
7520         /*
7521          * Destroy all old plane links and make the slave plane invisible
7522          * in the crtc_state->active_planes mask.
7523          */
7524         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7525                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
7526                         continue;
7527
7528                 plane_state->planar_linked_plane = NULL;
7529                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
7530                         crtc_state->enabled_planes &= ~BIT(plane->id);
7531                         crtc_state->active_planes &= ~BIT(plane->id);
7532                         crtc_state->update_planes |= BIT(plane->id);
7533                 }
7534
7535                 plane_state->planar_slave = false;
7536         }
7537
7538         if (!crtc_state->nv12_planes)
7539                 return 0;
7540
7541         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7542                 struct intel_plane_state *linked_state = NULL;
7543
7544                 if (plane->pipe != crtc->pipe ||
7545                     !(crtc_state->nv12_planes & BIT(plane->id)))
7546                         continue;
7547
7548                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
7549                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
7550                                 continue;
7551
7552                         if (crtc_state->active_planes & BIT(linked->id))
7553                                 continue;
7554
7555                         linked_state = intel_atomic_get_plane_state(state, linked);
7556                         if (IS_ERR(linked_state))
7557                                 return PTR_ERR(linked_state);
7558
7559                         break;
7560                 }
7561
7562                 if (!linked_state) {
7563                         drm_dbg_kms(&dev_priv->drm,
7564                                     "Need %d free Y planes for planar YUV\n",
7565                                     hweight8(crtc_state->nv12_planes));
7566
7567                         return -EINVAL;
7568                 }
7569
7570                 plane_state->planar_linked_plane = linked;
7571
7572                 linked_state->planar_slave = true;
7573                 linked_state->planar_linked_plane = plane;
7574                 crtc_state->enabled_planes |= BIT(linked->id);
7575                 crtc_state->active_planes |= BIT(linked->id);
7576                 crtc_state->update_planes |= BIT(linked->id);
7577                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7578                             linked->base.name, plane->base.name);
7579
7580                 /* Copy parameters to slave plane */
7581                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7582                 linked_state->color_ctl = plane_state->color_ctl;
7583                 linked_state->view = plane_state->view;
7584                 memcpy(linked_state->color_plane, plane_state->color_plane,
7585                        sizeof(linked_state->color_plane));
7586
7587                 intel_plane_copy_hw_state(linked_state, plane_state);
7588                 linked_state->uapi.src = plane_state->uapi.src;
7589                 linked_state->uapi.dst = plane_state->uapi.dst;
7590
7591                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
7592                         if (linked->id == PLANE_SPRITE5)
7593                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7594                         else if (linked->id == PLANE_SPRITE4)
7595                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7596                         else if (linked->id == PLANE_SPRITE3)
7597                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7598                         else if (linked->id == PLANE_SPRITE2)
7599                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7600                         else
7601                                 MISSING_CASE(linked->id);
7602                 }
7603         }
7604
7605         return 0;
7606 }
7607
7608 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7609 {
7610         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7611         struct intel_atomic_state *state =
7612                 to_intel_atomic_state(new_crtc_state->uapi.state);
7613         const struct intel_crtc_state *old_crtc_state =
7614                 intel_atomic_get_old_crtc_state(state, crtc);
7615
7616         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7617 }
7618
7619 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7620 {
7621         const struct drm_display_mode *pipe_mode =
7622                 &crtc_state->hw.pipe_mode;
7623         int linetime_wm;
7624
7625         if (!crtc_state->hw.enable)
7626                 return 0;
7627
7628         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7629                                         pipe_mode->crtc_clock);
7630
7631         return min(linetime_wm, 0x1ff);
7632 }
7633
7634 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7635                                const struct intel_cdclk_state *cdclk_state)
7636 {
7637         const struct drm_display_mode *pipe_mode =
7638                 &crtc_state->hw.pipe_mode;
7639         int linetime_wm;
7640
7641         if (!crtc_state->hw.enable)
7642                 return 0;
7643
7644         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7645                                         cdclk_state->logical.cdclk);
7646
7647         return min(linetime_wm, 0x1ff);
7648 }
7649
7650 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7651 {
7652         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7653         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7654         const struct drm_display_mode *pipe_mode =
7655                 &crtc_state->hw.pipe_mode;
7656         int linetime_wm;
7657
7658         if (!crtc_state->hw.enable)
7659                 return 0;
7660
7661         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7662                                    crtc_state->pixel_rate);
7663
7664         /* Display WA #1135: BXT:ALL GLK:ALL */
7665         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
7666                 linetime_wm /= 2;
7667
7668         return min(linetime_wm, 0x1ff);
7669 }
7670
7671 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7672                                    struct intel_crtc *crtc)
7673 {
7674         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7675         struct intel_crtc_state *crtc_state =
7676                 intel_atomic_get_new_crtc_state(state, crtc);
7677         const struct intel_cdclk_state *cdclk_state;
7678
7679         if (INTEL_GEN(dev_priv) >= 9)
7680                 crtc_state->linetime = skl_linetime_wm(crtc_state);
7681         else
7682                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
7683
7684         if (!hsw_crtc_supports_ips(crtc))
7685                 return 0;
7686
7687         cdclk_state = intel_atomic_get_cdclk_state(state);
7688         if (IS_ERR(cdclk_state))
7689                 return PTR_ERR(cdclk_state);
7690
7691         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7692                                                        cdclk_state);
7693
7694         return 0;
7695 }
7696
7697 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7698                                    struct intel_crtc *crtc)
7699 {
7700         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7701         struct intel_crtc_state *crtc_state =
7702                 intel_atomic_get_new_crtc_state(state, crtc);
7703         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7704         int ret;
7705
7706         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7707             mode_changed && !crtc_state->hw.active)
7708                 crtc_state->update_wm_post = true;
7709
7710         if (mode_changed && crtc_state->hw.enable &&
7711             dev_priv->display.crtc_compute_clock &&
7712             !crtc_state->bigjoiner_slave &&
7713             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7714                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7715                 if (ret)
7716                         return ret;
7717         }
7718
7719         /*
7720          * May need to update pipe gamma enable bits
7721          * when C8 planes are getting enabled/disabled.
7722          */
7723         if (c8_planes_changed(crtc_state))
7724                 crtc_state->uapi.color_mgmt_changed = true;
7725
7726         if (mode_changed || crtc_state->update_pipe ||
7727             crtc_state->uapi.color_mgmt_changed) {
7728                 ret = intel_color_check(crtc_state);
7729                 if (ret)
7730                         return ret;
7731         }
7732
7733         if (dev_priv->display.compute_pipe_wm) {
7734                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
7735                 if (ret) {
7736                         drm_dbg_kms(&dev_priv->drm,
7737                                     "Target pipe watermarks are invalid\n");
7738                         return ret;
7739                 }
7740         }
7741
7742         if (dev_priv->display.compute_intermediate_wm) {
7743                 if (drm_WARN_ON(&dev_priv->drm,
7744                                 !dev_priv->display.compute_pipe_wm))
7745                         return 0;
7746
7747                 /*
7748                  * Calculate 'intermediate' watermarks that satisfy both the
7749                  * old state and the new state.  We can program these
7750                  * immediately.
7751                  */
7752                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
7753                 if (ret) {
7754                         drm_dbg_kms(&dev_priv->drm,
7755                                     "No valid intermediate pipe watermarks are possible\n");
7756                         return ret;
7757                 }
7758         }
7759
7760         if (INTEL_GEN(dev_priv) >= 9) {
7761                 if (mode_changed || crtc_state->update_pipe) {
7762                         ret = skl_update_scaler_crtc(crtc_state);
7763                         if (ret)
7764                                 return ret;
7765                 }
7766
7767                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7768                 if (ret)
7769                         return ret;
7770         }
7771
7772         if (HAS_IPS(dev_priv)) {
7773                 ret = hsw_compute_ips_config(crtc_state);
7774                 if (ret)
7775                         return ret;
7776         }
7777
7778         if (INTEL_GEN(dev_priv) >= 9 ||
7779             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7780                 ret = hsw_compute_linetime_wm(state, crtc);
7781                 if (ret)
7782                         return ret;
7783
7784         }
7785
7786         if (!mode_changed) {
7787                 ret = intel_psr2_sel_fetch_update(state, crtc);
7788                 if (ret)
7789                         return ret;
7790         }
7791
7792         return 0;
7793 }
7794
7795 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7796 {
7797         struct intel_connector *connector;
7798         struct drm_connector_list_iter conn_iter;
7799
7800         drm_connector_list_iter_begin(dev, &conn_iter);
7801         for_each_intel_connector_iter(connector, &conn_iter) {
7802                 struct drm_connector_state *conn_state = connector->base.state;
7803                 struct intel_encoder *encoder =
7804                         to_intel_encoder(connector->base.encoder);
7805
7806                 if (conn_state->crtc)
7807                         drm_connector_put(&connector->base);
7808
7809                 if (encoder) {
7810                         struct intel_crtc *crtc =
7811                                 to_intel_crtc(encoder->base.crtc);
7812                         const struct intel_crtc_state *crtc_state =
7813                                 to_intel_crtc_state(crtc->base.state);
7814
7815                         conn_state->best_encoder = &encoder->base;
7816                         conn_state->crtc = &crtc->base;
7817                         conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7818
7819                         drm_connector_get(&connector->base);
7820                 } else {
7821                         conn_state->best_encoder = NULL;
7822                         conn_state->crtc = NULL;
7823                 }
7824         }
7825         drm_connector_list_iter_end(&conn_iter);
7826 }
7827
7828 static int
7829 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7830                       struct intel_crtc_state *pipe_config)
7831 {
7832         struct drm_connector *connector = conn_state->connector;
7833         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7834         const struct drm_display_info *info = &connector->display_info;
7835         int bpp;
7836
7837         switch (conn_state->max_bpc) {
7838         case 6 ... 7:
7839                 bpp = 6 * 3;
7840                 break;
7841         case 8 ... 9:
7842                 bpp = 8 * 3;
7843                 break;
7844         case 10 ... 11:
7845                 bpp = 10 * 3;
7846                 break;
7847         case 12 ... 16:
7848                 bpp = 12 * 3;
7849                 break;
7850         default:
7851                 MISSING_CASE(conn_state->max_bpc);
7852                 return -EINVAL;
7853         }
7854
7855         if (bpp < pipe_config->pipe_bpp) {
7856                 drm_dbg_kms(&i915->drm,
7857                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7858                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7859                             connector->base.id, connector->name,
7860                             bpp, 3 * info->bpc,
7861                             3 * conn_state->max_requested_bpc,
7862                             pipe_config->pipe_bpp);
7863
7864                 pipe_config->pipe_bpp = bpp;
7865         }
7866
7867         return 0;
7868 }
7869
7870 static int
7871 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7872                           struct intel_crtc_state *pipe_config)
7873 {
7874         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7875         struct drm_atomic_state *state = pipe_config->uapi.state;
7876         struct drm_connector *connector;
7877         struct drm_connector_state *connector_state;
7878         int bpp, i;
7879
7880         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7881             IS_CHERRYVIEW(dev_priv)))
7882                 bpp = 10*3;
7883         else if (INTEL_GEN(dev_priv) >= 5)
7884                 bpp = 12*3;
7885         else
7886                 bpp = 8*3;
7887
7888         pipe_config->pipe_bpp = bpp;
7889
7890         /* Clamp display bpp to connector max bpp */
7891         for_each_new_connector_in_state(state, connector, connector_state, i) {
7892                 int ret;
7893
7894                 if (connector_state->crtc != &crtc->base)
7895                         continue;
7896
7897                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7898                 if (ret)
7899                         return ret;
7900         }
7901
7902         return 0;
7903 }
7904
7905 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7906                                     const struct drm_display_mode *mode)
7907 {
7908         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7909                     "type: 0x%x flags: 0x%x\n",
7910                     mode->crtc_clock,
7911                     mode->crtc_hdisplay, mode->crtc_hsync_start,
7912                     mode->crtc_hsync_end, mode->crtc_htotal,
7913                     mode->crtc_vdisplay, mode->crtc_vsync_start,
7914                     mode->crtc_vsync_end, mode->crtc_vtotal,
7915                     mode->type, mode->flags);
7916 }
7917
7918 static void
7919 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7920                       const char *id, unsigned int lane_count,
7921                       const struct intel_link_m_n *m_n)
7922 {
7923         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7924
7925         drm_dbg_kms(&i915->drm,
7926                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7927                     id, lane_count,
7928                     m_n->gmch_m, m_n->gmch_n,
7929                     m_n->link_m, m_n->link_n, m_n->tu);
7930 }
7931
7932 static void
7933 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7934                      const union hdmi_infoframe *frame)
7935 {
7936         if (!drm_debug_enabled(DRM_UT_KMS))
7937                 return;
7938
7939         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7940 }
7941
7942 static void
7943 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7944                       const struct drm_dp_vsc_sdp *vsc)
7945 {
7946         if (!drm_debug_enabled(DRM_UT_KMS))
7947                 return;
7948
7949         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7950 }
7951
7952 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7953
7954 static const char * const output_type_str[] = {
7955         OUTPUT_TYPE(UNUSED),
7956         OUTPUT_TYPE(ANALOG),
7957         OUTPUT_TYPE(DVO),
7958         OUTPUT_TYPE(SDVO),
7959         OUTPUT_TYPE(LVDS),
7960         OUTPUT_TYPE(TVOUT),
7961         OUTPUT_TYPE(HDMI),
7962         OUTPUT_TYPE(DP),
7963         OUTPUT_TYPE(EDP),
7964         OUTPUT_TYPE(DSI),
7965         OUTPUT_TYPE(DDI),
7966         OUTPUT_TYPE(DP_MST),
7967 };
7968
7969 #undef OUTPUT_TYPE
7970
7971 static void snprintf_output_types(char *buf, size_t len,
7972                                   unsigned int output_types)
7973 {
7974         char *str = buf;
7975         int i;
7976
7977         str[0] = '\0';
7978
7979         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7980                 int r;
7981
7982                 if ((output_types & BIT(i)) == 0)
7983                         continue;
7984
7985                 r = snprintf(str, len, "%s%s",
7986                              str != buf ? "," : "", output_type_str[i]);
7987                 if (r >= len)
7988                         break;
7989                 str += r;
7990                 len -= r;
7991
7992                 output_types &= ~BIT(i);
7993         }
7994
7995         WARN_ON_ONCE(output_types != 0);
7996 }
7997
7998 static const char * const output_format_str[] = {
7999         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
8000         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
8001         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
8002 };
8003
8004 static const char *output_formats(enum intel_output_format format)
8005 {
8006         if (format >= ARRAY_SIZE(output_format_str))
8007                 return "invalid";
8008         return output_format_str[format];
8009 }
8010
8011 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
8012 {
8013         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
8014         struct drm_i915_private *i915 = to_i915(plane->base.dev);
8015         const struct drm_framebuffer *fb = plane_state->hw.fb;
8016         struct drm_format_name_buf format_name;
8017
8018         if (!fb) {
8019                 drm_dbg_kms(&i915->drm,
8020                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
8021                             plane->base.base.id, plane->base.name,
8022                             yesno(plane_state->uapi.visible));
8023                 return;
8024         }
8025
8026         drm_dbg_kms(&i915->drm,
8027                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
8028                     plane->base.base.id, plane->base.name,
8029                     fb->base.id, fb->width, fb->height,
8030                     drm_get_format_name(fb->format->format, &format_name),
8031                     fb->modifier, yesno(plane_state->uapi.visible));
8032         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
8033                     plane_state->hw.rotation, plane_state->scaler_id);
8034         if (plane_state->uapi.visible)
8035                 drm_dbg_kms(&i915->drm,
8036                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
8037                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
8038                             DRM_RECT_ARG(&plane_state->uapi.dst));
8039 }
8040
8041 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
8042                                    struct intel_atomic_state *state,
8043                                    const char *context)
8044 {
8045         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8046         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8047         const struct intel_plane_state *plane_state;
8048         struct intel_plane *plane;
8049         char buf[64];
8050         int i;
8051
8052         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
8053                     crtc->base.base.id, crtc->base.name,
8054                     yesno(pipe_config->hw.enable), context);
8055
8056         if (!pipe_config->hw.enable)
8057                 goto dump_planes;
8058
8059         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
8060         drm_dbg_kms(&dev_priv->drm,
8061                     "active: %s, output_types: %s (0x%x), output format: %s\n",
8062                     yesno(pipe_config->hw.active),
8063                     buf, pipe_config->output_types,
8064                     output_formats(pipe_config->output_format));
8065
8066         drm_dbg_kms(&dev_priv->drm,
8067                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
8068                     transcoder_name(pipe_config->cpu_transcoder),
8069                     pipe_config->pipe_bpp, pipe_config->dither);
8070
8071         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
8072                     transcoder_name(pipe_config->mst_master_transcoder));
8073
8074         drm_dbg_kms(&dev_priv->drm,
8075                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
8076                     transcoder_name(pipe_config->master_transcoder),
8077                     pipe_config->sync_mode_slaves_mask);
8078
8079         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
8080                     pipe_config->bigjoiner_slave ? "slave" :
8081                     pipe_config->bigjoiner ? "master" : "no");
8082
8083         drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
8084                     enableddisabled(pipe_config->splitter.enable),
8085                     pipe_config->splitter.link_count,
8086                     pipe_config->splitter.pixel_overlap);
8087
8088         if (pipe_config->has_pch_encoder)
8089                 intel_dump_m_n_config(pipe_config, "fdi",
8090                                       pipe_config->fdi_lanes,
8091                                       &pipe_config->fdi_m_n);
8092
8093         if (intel_crtc_has_dp_encoder(pipe_config)) {
8094                 intel_dump_m_n_config(pipe_config, "dp m_n",
8095                                 pipe_config->lane_count, &pipe_config->dp_m_n);
8096                 if (pipe_config->has_drrs)
8097                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
8098                                               pipe_config->lane_count,
8099                                               &pipe_config->dp_m2_n2);
8100         }
8101
8102         drm_dbg_kms(&dev_priv->drm,
8103                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
8104                     pipe_config->has_audio, pipe_config->has_infoframe,
8105                     pipe_config->infoframes.enable);
8106
8107         if (pipe_config->infoframes.enable &
8108             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
8109                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
8110                             pipe_config->infoframes.gcp);
8111         if (pipe_config->infoframes.enable &
8112             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
8113                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
8114         if (pipe_config->infoframes.enable &
8115             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
8116                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
8117         if (pipe_config->infoframes.enable &
8118             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
8119                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
8120         if (pipe_config->infoframes.enable &
8121             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
8122                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8123         if (pipe_config->infoframes.enable &
8124             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
8125                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
8126         if (pipe_config->infoframes.enable &
8127             intel_hdmi_infoframe_enable(DP_SDP_VSC))
8128                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
8129
8130         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
8131                     yesno(pipe_config->vrr.enable),
8132                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
8133                     pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
8134                     intel_vrr_vmin_vblank_start(pipe_config),
8135                     intel_vrr_vmax_vblank_start(pipe_config));
8136
8137         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
8138         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
8139         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
8140         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
8141         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
8142         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
8143         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
8144         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
8145         drm_dbg_kms(&dev_priv->drm,
8146                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
8147                     pipe_config->port_clock,
8148                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
8149                     pipe_config->pixel_rate);
8150
8151         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
8152                     pipe_config->linetime, pipe_config->ips_linetime);
8153
8154         if (INTEL_GEN(dev_priv) >= 9)
8155                 drm_dbg_kms(&dev_priv->drm,
8156                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
8157                             crtc->num_scalers,
8158                             pipe_config->scaler_state.scaler_users,
8159                             pipe_config->scaler_state.scaler_id);
8160
8161         if (HAS_GMCH(dev_priv))
8162                 drm_dbg_kms(&dev_priv->drm,
8163                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8164                             pipe_config->gmch_pfit.control,
8165                             pipe_config->gmch_pfit.pgm_ratios,
8166                             pipe_config->gmch_pfit.lvds_border_bits);
8167         else
8168                 drm_dbg_kms(&dev_priv->drm,
8169                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
8170                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
8171                             enableddisabled(pipe_config->pch_pfit.enabled),
8172                             yesno(pipe_config->pch_pfit.force_thru));
8173
8174         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
8175                     pipe_config->ips_enabled, pipe_config->double_wide);
8176
8177         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
8178
8179         if (IS_CHERRYVIEW(dev_priv))
8180                 drm_dbg_kms(&dev_priv->drm,
8181                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8182                             pipe_config->cgm_mode, pipe_config->gamma_mode,
8183                             pipe_config->gamma_enable, pipe_config->csc_enable);
8184         else
8185                 drm_dbg_kms(&dev_priv->drm,
8186                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
8187                             pipe_config->csc_mode, pipe_config->gamma_mode,
8188                             pipe_config->gamma_enable, pipe_config->csc_enable);
8189
8190         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
8191                     pipe_config->hw.degamma_lut ?
8192                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
8193                     pipe_config->hw.gamma_lut ?
8194                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
8195
8196 dump_planes:
8197         if (!state)
8198                 return;
8199
8200         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8201                 if (plane->pipe == crtc->pipe)
8202                         intel_dump_plane_state(plane_state);
8203         }
8204 }
8205
8206 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
8207 {
8208         struct drm_device *dev = state->base.dev;
8209         struct drm_connector *connector;
8210         struct drm_connector_list_iter conn_iter;
8211         unsigned int used_ports = 0;
8212         unsigned int used_mst_ports = 0;
8213         bool ret = true;
8214
8215         /*
8216          * We're going to peek into connector->state,
8217          * hence connection_mutex must be held.
8218          */
8219         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
8220
8221         /*
8222          * Walk the connector list instead of the encoder
8223          * list to detect the problem on ddi platforms
8224          * where there's just one encoder per digital port.
8225          */
8226         drm_connector_list_iter_begin(dev, &conn_iter);
8227         drm_for_each_connector_iter(connector, &conn_iter) {
8228                 struct drm_connector_state *connector_state;
8229                 struct intel_encoder *encoder;
8230
8231                 connector_state =
8232                         drm_atomic_get_new_connector_state(&state->base,
8233                                                            connector);
8234                 if (!connector_state)
8235                         connector_state = connector->state;
8236
8237                 if (!connector_state->best_encoder)
8238                         continue;
8239
8240                 encoder = to_intel_encoder(connector_state->best_encoder);
8241
8242                 drm_WARN_ON(dev, !connector_state->crtc);
8243
8244                 switch (encoder->type) {
8245                 case INTEL_OUTPUT_DDI:
8246                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
8247                                 break;
8248                         fallthrough;
8249                 case INTEL_OUTPUT_DP:
8250                 case INTEL_OUTPUT_HDMI:
8251                 case INTEL_OUTPUT_EDP:
8252                         /* the same port mustn't appear more than once */
8253                         if (used_ports & BIT(encoder->port))
8254                                 ret = false;
8255
8256                         used_ports |= BIT(encoder->port);
8257                         break;
8258                 case INTEL_OUTPUT_DP_MST:
8259                         used_mst_ports |=
8260                                 1 << encoder->port;
8261                         break;
8262                 default:
8263                         break;
8264                 }
8265         }
8266         drm_connector_list_iter_end(&conn_iter);
8267
8268         /* can't mix MST and SST/HDMI on the same port */
8269         if (used_ports & used_mst_ports)
8270                 return false;
8271
8272         return ret;
8273 }
8274
8275 static void
8276 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
8277                                            struct intel_crtc_state *crtc_state)
8278 {
8279         const struct intel_crtc_state *from_crtc_state = crtc_state;
8280
8281         if (crtc_state->bigjoiner_slave) {
8282                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
8283                                                                   crtc_state->bigjoiner_linked_crtc);
8284
8285                 /* No need to copy state if the master state is unchanged */
8286                 if (!from_crtc_state)
8287                         return;
8288         }
8289
8290         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
8291 }
8292
8293 static void
8294 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
8295                                  struct intel_crtc_state *crtc_state)
8296 {
8297         crtc_state->hw.enable = crtc_state->uapi.enable;
8298         crtc_state->hw.active = crtc_state->uapi.active;
8299         crtc_state->hw.mode = crtc_state->uapi.mode;
8300         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
8301         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
8302
8303         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
8304 }
8305
8306 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
8307 {
8308         if (crtc_state->bigjoiner_slave)
8309                 return;
8310
8311         crtc_state->uapi.enable = crtc_state->hw.enable;
8312         crtc_state->uapi.active = crtc_state->hw.active;
8313         drm_WARN_ON(crtc_state->uapi.crtc->dev,
8314                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
8315
8316         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
8317         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
8318
8319         /* copy color blobs to uapi */
8320         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
8321                                   crtc_state->hw.degamma_lut);
8322         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
8323                                   crtc_state->hw.gamma_lut);
8324         drm_property_replace_blob(&crtc_state->uapi.ctm,
8325                                   crtc_state->hw.ctm);
8326 }
8327
8328 static int
8329 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
8330                           const struct intel_crtc_state *from_crtc_state)
8331 {
8332         struct intel_crtc_state *saved_state;
8333         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8334
8335         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
8336         if (!saved_state)
8337                 return -ENOMEM;
8338
8339         saved_state->uapi = crtc_state->uapi;
8340         saved_state->scaler_state = crtc_state->scaler_state;
8341         saved_state->shared_dpll = crtc_state->shared_dpll;
8342         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8343         saved_state->crc_enabled = crtc_state->crc_enabled;
8344
8345         intel_crtc_free_hw_state(crtc_state);
8346         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8347         kfree(saved_state);
8348
8349         /* Re-init hw state */
8350         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
8351         crtc_state->hw.enable = from_crtc_state->hw.enable;
8352         crtc_state->hw.active = from_crtc_state->hw.active;
8353         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
8354         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
8355
8356         /* Some fixups */
8357         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
8358         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
8359         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
8360         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
8361         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
8362         crtc_state->bigjoiner_slave = true;
8363         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
8364         crtc_state->has_audio = false;
8365
8366         return 0;
8367 }
8368
8369 static int
8370 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
8371                                  struct intel_crtc_state *crtc_state)
8372 {
8373         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8374         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8375         struct intel_crtc_state *saved_state;
8376
8377         saved_state = intel_crtc_state_alloc(crtc);
8378         if (!saved_state)
8379                 return -ENOMEM;
8380
8381         /* free the old crtc_state->hw members */
8382         intel_crtc_free_hw_state(crtc_state);
8383
8384         /* FIXME: before the switch to atomic started, a new pipe_config was
8385          * kzalloc'd. Code that depends on any field being zero should be
8386          * fixed, so that the crtc_state can be safely duplicated. For now,
8387          * only fields that are know to not cause problems are preserved. */
8388
8389         saved_state->uapi = crtc_state->uapi;
8390         saved_state->scaler_state = crtc_state->scaler_state;
8391         saved_state->shared_dpll = crtc_state->shared_dpll;
8392         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
8393         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
8394                sizeof(saved_state->icl_port_dplls));
8395         saved_state->crc_enabled = crtc_state->crc_enabled;
8396         if (IS_G4X(dev_priv) ||
8397             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8398                 saved_state->wm = crtc_state->wm;
8399
8400         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
8401         kfree(saved_state);
8402
8403         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
8404
8405         return 0;
8406 }
8407
8408 static int
8409 intel_modeset_pipe_config(struct intel_atomic_state *state,
8410                           struct intel_crtc_state *pipe_config)
8411 {
8412         struct drm_crtc *crtc = pipe_config->uapi.crtc;
8413         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
8414         struct drm_connector *connector;
8415         struct drm_connector_state *connector_state;
8416         int base_bpp, ret, i;
8417         bool retry = true;
8418
8419         pipe_config->cpu_transcoder =
8420                 (enum transcoder) to_intel_crtc(crtc)->pipe;
8421
8422         /*
8423          * Sanitize sync polarity flags based on requested ones. If neither
8424          * positive or negative polarity is requested, treat this as meaning
8425          * negative polarity.
8426          */
8427         if (!(pipe_config->hw.adjusted_mode.flags &
8428               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8429                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8430
8431         if (!(pipe_config->hw.adjusted_mode.flags &
8432               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8433                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8434
8435         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8436                                         pipe_config);
8437         if (ret)
8438                 return ret;
8439
8440         base_bpp = pipe_config->pipe_bpp;
8441
8442         /*
8443          * Determine the real pipe dimensions. Note that stereo modes can
8444          * increase the actual pipe size due to the frame doubling and
8445          * insertion of additional space for blanks between the frame. This
8446          * is stored in the crtc timings. We use the requested mode to do this
8447          * computation to clearly distinguish it from the adjusted mode, which
8448          * can be changed by the connectors in the below retry loop.
8449          */
8450         drm_mode_get_hv_timing(&pipe_config->hw.mode,
8451                                &pipe_config->pipe_src_w,
8452                                &pipe_config->pipe_src_h);
8453
8454         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8455                 struct intel_encoder *encoder =
8456                         to_intel_encoder(connector_state->best_encoder);
8457
8458                 if (connector_state->crtc != crtc)
8459                         continue;
8460
8461                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
8462                         drm_dbg_kms(&i915->drm,
8463                                     "rejecting invalid cloning configuration\n");
8464                         return -EINVAL;
8465                 }
8466
8467                 /*
8468                  * Determine output_types before calling the .compute_config()
8469                  * hooks so that the hooks can use this information safely.
8470                  */
8471                 if (encoder->compute_output_type)
8472                         pipe_config->output_types |=
8473                                 BIT(encoder->compute_output_type(encoder, pipe_config,
8474                                                                  connector_state));
8475                 else
8476                         pipe_config->output_types |= BIT(encoder->type);
8477         }
8478
8479 encoder_retry:
8480         /* Ensure the port clock defaults are reset when retrying. */
8481         pipe_config->port_clock = 0;
8482         pipe_config->pixel_multiplier = 1;
8483
8484         /* Fill in default crtc timings, allow encoders to overwrite them. */
8485         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
8486                               CRTC_STEREO_DOUBLE);
8487
8488         /* Pass our mode to the connectors and the CRTC to give them a chance to
8489          * adjust it according to limitations or connector properties, and also
8490          * a chance to reject the mode entirely.
8491          */
8492         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8493                 struct intel_encoder *encoder =
8494                         to_intel_encoder(connector_state->best_encoder);
8495
8496                 if (connector_state->crtc != crtc)
8497                         continue;
8498
8499                 ret = encoder->compute_config(encoder, pipe_config,
8500                                               connector_state);
8501                 if (ret < 0) {
8502                         if (ret != -EDEADLK)
8503                                 drm_dbg_kms(&i915->drm,
8504                                             "Encoder config failure: %d\n",
8505                                             ret);
8506                         return ret;
8507                 }
8508         }
8509
8510         /* Set default port clock if not overwritten by the encoder. Needs to be
8511          * done afterwards in case the encoder adjusts the mode. */
8512         if (!pipe_config->port_clock)
8513                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
8514                         * pipe_config->pixel_multiplier;
8515
8516         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8517         if (ret == -EDEADLK)
8518                 return ret;
8519         if (ret < 0) {
8520                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
8521                 return ret;
8522         }
8523
8524         if (ret == I915_DISPLAY_CONFIG_RETRY) {
8525                 if (drm_WARN(&i915->drm, !retry,
8526                              "loop in pipe configuration computation\n"))
8527                         return -EINVAL;
8528
8529                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
8530                 retry = false;
8531                 goto encoder_retry;
8532         }
8533
8534         /* Dithering seems to not pass-through bits correctly when it should, so
8535          * only enable it on 6bpc panels and when its not a compliance
8536          * test requesting 6bpc video pattern.
8537          */
8538         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
8539                 !pipe_config->dither_force_disable;
8540         drm_dbg_kms(&i915->drm,
8541                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
8542                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8543
8544         return 0;
8545 }
8546
8547 static int
8548 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
8549 {
8550         struct intel_atomic_state *state =
8551                 to_intel_atomic_state(crtc_state->uapi.state);
8552         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8553         struct drm_connector_state *conn_state;
8554         struct drm_connector *connector;
8555         int i;
8556
8557         for_each_new_connector_in_state(&state->base, connector,
8558                                         conn_state, i) {
8559                 struct intel_encoder *encoder =
8560                         to_intel_encoder(conn_state->best_encoder);
8561                 int ret;
8562
8563                 if (conn_state->crtc != &crtc->base ||
8564                     !encoder->compute_config_late)
8565                         continue;
8566
8567                 ret = encoder->compute_config_late(encoder, crtc_state,
8568                                                    conn_state);
8569                 if (ret)
8570                         return ret;
8571         }
8572
8573         return 0;
8574 }
8575
8576 bool intel_fuzzy_clock_check(int clock1, int clock2)
8577 {
8578         int diff;
8579
8580         if (clock1 == clock2)
8581                 return true;
8582
8583         if (!clock1 || !clock2)
8584                 return false;
8585
8586         diff = abs(clock1 - clock2);
8587
8588         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8589                 return true;
8590
8591         return false;
8592 }
8593
8594 static bool
8595 intel_compare_m_n(unsigned int m, unsigned int n,
8596                   unsigned int m2, unsigned int n2,
8597                   bool exact)
8598 {
8599         if (m == m2 && n == n2)
8600                 return true;
8601
8602         if (exact || !m || !n || !m2 || !n2)
8603                 return false;
8604
8605         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8606
8607         if (n > n2) {
8608                 while (n > n2) {
8609                         m2 <<= 1;
8610                         n2 <<= 1;
8611                 }
8612         } else if (n < n2) {
8613                 while (n < n2) {
8614                         m <<= 1;
8615                         n <<= 1;
8616                 }
8617         }
8618
8619         if (n != n2)
8620                 return false;
8621
8622         return intel_fuzzy_clock_check(m, m2);
8623 }
8624
8625 static bool
8626 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8627                        const struct intel_link_m_n *m2_n2,
8628                        bool exact)
8629 {
8630         return m_n->tu == m2_n2->tu &&
8631                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8632                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8633                 intel_compare_m_n(m_n->link_m, m_n->link_n,
8634                                   m2_n2->link_m, m2_n2->link_n, exact);
8635 }
8636
8637 static bool
8638 intel_compare_infoframe(const union hdmi_infoframe *a,
8639                         const union hdmi_infoframe *b)
8640 {
8641         return memcmp(a, b, sizeof(*a)) == 0;
8642 }
8643
8644 static bool
8645 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8646                          const struct drm_dp_vsc_sdp *b)
8647 {
8648         return memcmp(a, b, sizeof(*a)) == 0;
8649 }
8650
8651 static void
8652 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8653                                bool fastset, const char *name,
8654                                const union hdmi_infoframe *a,
8655                                const union hdmi_infoframe *b)
8656 {
8657         if (fastset) {
8658                 if (!drm_debug_enabled(DRM_UT_KMS))
8659                         return;
8660
8661                 drm_dbg_kms(&dev_priv->drm,
8662                             "fastset mismatch in %s infoframe\n", name);
8663                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8664                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8665                 drm_dbg_kms(&dev_priv->drm, "found:\n");
8666                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8667         } else {
8668                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8669                 drm_err(&dev_priv->drm, "expected:\n");
8670                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8671                 drm_err(&dev_priv->drm, "found:\n");
8672                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8673         }
8674 }
8675
8676 static void
8677 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8678                                 bool fastset, const char *name,
8679                                 const struct drm_dp_vsc_sdp *a,
8680                                 const struct drm_dp_vsc_sdp *b)
8681 {
8682         if (fastset) {
8683                 if (!drm_debug_enabled(DRM_UT_KMS))
8684                         return;
8685
8686                 drm_dbg_kms(&dev_priv->drm,
8687                             "fastset mismatch in %s dp sdp\n", name);
8688                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
8689                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8690                 drm_dbg_kms(&dev_priv->drm, "found:\n");
8691                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8692         } else {
8693                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8694                 drm_err(&dev_priv->drm, "expected:\n");
8695                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8696                 drm_err(&dev_priv->drm, "found:\n");
8697                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8698         }
8699 }
8700
8701 static void __printf(4, 5)
8702 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8703                      const char *name, const char *format, ...)
8704 {
8705         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8706         struct va_format vaf;
8707         va_list args;
8708
8709         va_start(args, format);
8710         vaf.fmt = format;
8711         vaf.va = &args;
8712
8713         if (fastset)
8714                 drm_dbg_kms(&i915->drm,
8715                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8716                             crtc->base.base.id, crtc->base.name, name, &vaf);
8717         else
8718                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8719                         crtc->base.base.id, crtc->base.name, name, &vaf);
8720
8721         va_end(args);
8722 }
8723
8724 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8725 {
8726         if (dev_priv->params.fastboot != -1)
8727                 return dev_priv->params.fastboot;
8728
8729         /* Enable fastboot by default on Skylake and newer */
8730         if (INTEL_GEN(dev_priv) >= 9)
8731                 return true;
8732
8733         /* Enable fastboot by default on VLV and CHV */
8734         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8735                 return true;
8736
8737         /* Disabled by default on all others */
8738         return false;
8739 }
8740
8741 static bool
8742 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8743                           const struct intel_crtc_state *pipe_config,
8744                           bool fastset)
8745 {
8746         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8747         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8748         bool ret = true;
8749         u32 bp_gamma = 0;
8750         bool fixup_inherited = fastset &&
8751                 current_config->inherited && !pipe_config->inherited;
8752
8753         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8754                 drm_dbg_kms(&dev_priv->drm,
8755                             "initial modeset and fastboot not set\n");
8756                 ret = false;
8757         }
8758
8759 #define PIPE_CONF_CHECK_X(name) do { \
8760         if (current_config->name != pipe_config->name) { \
8761                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8762                                      "(expected 0x%08x, found 0x%08x)", \
8763                                      current_config->name, \
8764                                      pipe_config->name); \
8765                 ret = false; \
8766         } \
8767 } while (0)
8768
8769 #define PIPE_CONF_CHECK_I(name) do { \
8770         if (current_config->name != pipe_config->name) { \
8771                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8772                                      "(expected %i, found %i)", \
8773                                      current_config->name, \
8774                                      pipe_config->name); \
8775                 ret = false; \
8776         } \
8777 } while (0)
8778
8779 #define PIPE_CONF_CHECK_BOOL(name) do { \
8780         if (current_config->name != pipe_config->name) { \
8781                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8782                                      "(expected %s, found %s)", \
8783                                      yesno(current_config->name), \
8784                                      yesno(pipe_config->name)); \
8785                 ret = false; \
8786         } \
8787 } while (0)
8788
8789 /*
8790  * Checks state where we only read out the enabling, but not the entire
8791  * state itself (like full infoframes or ELD for audio). These states
8792  * require a full modeset on bootup to fix up.
8793  */
8794 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8795         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8796                 PIPE_CONF_CHECK_BOOL(name); \
8797         } else { \
8798                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8799                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8800                                      yesno(current_config->name), \
8801                                      yesno(pipe_config->name)); \
8802                 ret = false; \
8803         } \
8804 } while (0)
8805
8806 #define PIPE_CONF_CHECK_P(name) do { \
8807         if (current_config->name != pipe_config->name) { \
8808                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8809                                      "(expected %p, found %p)", \
8810                                      current_config->name, \
8811                                      pipe_config->name); \
8812                 ret = false; \
8813         } \
8814 } while (0)
8815
8816 #define PIPE_CONF_CHECK_M_N(name) do { \
8817         if (!intel_compare_link_m_n(&current_config->name, \
8818                                     &pipe_config->name,\
8819                                     !fastset)) { \
8820                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8821                                      "(expected tu %i gmch %i/%i link %i/%i, " \
8822                                      "found tu %i, gmch %i/%i link %i/%i)", \
8823                                      current_config->name.tu, \
8824                                      current_config->name.gmch_m, \
8825                                      current_config->name.gmch_n, \
8826                                      current_config->name.link_m, \
8827                                      current_config->name.link_n, \
8828                                      pipe_config->name.tu, \
8829                                      pipe_config->name.gmch_m, \
8830                                      pipe_config->name.gmch_n, \
8831                                      pipe_config->name.link_m, \
8832                                      pipe_config->name.link_n); \
8833                 ret = false; \
8834         } \
8835 } while (0)
8836
8837 /* This is required for BDW+ where there is only one set of registers for
8838  * switching between high and low RR.
8839  * This macro can be used whenever a comparison has to be made between one
8840  * hw state and multiple sw state variables.
8841  */
8842 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8843         if (!intel_compare_link_m_n(&current_config->name, \
8844                                     &pipe_config->name, !fastset) && \
8845             !intel_compare_link_m_n(&current_config->alt_name, \
8846                                     &pipe_config->name, !fastset)) { \
8847                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8848                                      "(expected tu %i gmch %i/%i link %i/%i, " \
8849                                      "or tu %i gmch %i/%i link %i/%i, " \
8850                                      "found tu %i, gmch %i/%i link %i/%i)", \
8851                                      current_config->name.tu, \
8852                                      current_config->name.gmch_m, \
8853                                      current_config->name.gmch_n, \
8854                                      current_config->name.link_m, \
8855                                      current_config->name.link_n, \
8856                                      current_config->alt_name.tu, \
8857                                      current_config->alt_name.gmch_m, \
8858                                      current_config->alt_name.gmch_n, \
8859                                      current_config->alt_name.link_m, \
8860                                      current_config->alt_name.link_n, \
8861                                      pipe_config->name.tu, \
8862                                      pipe_config->name.gmch_m, \
8863                                      pipe_config->name.gmch_n, \
8864                                      pipe_config->name.link_m, \
8865                                      pipe_config->name.link_n); \
8866                 ret = false; \
8867         } \
8868 } while (0)
8869
8870 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8871         if ((current_config->name ^ pipe_config->name) & (mask)) { \
8872                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8873                                      "(%x) (expected %i, found %i)", \
8874                                      (mask), \
8875                                      current_config->name & (mask), \
8876                                      pipe_config->name & (mask)); \
8877                 ret = false; \
8878         } \
8879 } while (0)
8880
8881 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8882         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8883                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
8884                                      "(expected %i, found %i)", \
8885                                      current_config->name, \
8886                                      pipe_config->name); \
8887                 ret = false; \
8888         } \
8889 } while (0)
8890
8891 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8892         if (!intel_compare_infoframe(&current_config->infoframes.name, \
8893                                      &pipe_config->infoframes.name)) { \
8894                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8895                                                &current_config->infoframes.name, \
8896                                                &pipe_config->infoframes.name); \
8897                 ret = false; \
8898         } \
8899 } while (0)
8900
8901 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8902         if (!current_config->has_psr && !pipe_config->has_psr && \
8903             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8904                                       &pipe_config->infoframes.name)) { \
8905                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8906                                                 &current_config->infoframes.name, \
8907                                                 &pipe_config->infoframes.name); \
8908                 ret = false; \
8909         } \
8910 } while (0)
8911
8912 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8913         if (current_config->name1 != pipe_config->name1) { \
8914                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8915                                 "(expected %i, found %i, won't compare lut values)", \
8916                                 current_config->name1, \
8917                                 pipe_config->name1); \
8918                 ret = false;\
8919         } else { \
8920                 if (!intel_color_lut_equal(current_config->name2, \
8921                                         pipe_config->name2, pipe_config->name1, \
8922                                         bit_precision)) { \
8923                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8924                                         "hw_state doesn't match sw_state"); \
8925                         ret = false; \
8926                 } \
8927         } \
8928 } while (0)
8929
8930 #define PIPE_CONF_QUIRK(quirk) \
8931         ((current_config->quirks | pipe_config->quirks) & (quirk))
8932
8933         PIPE_CONF_CHECK_I(cpu_transcoder);
8934
8935         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8936         PIPE_CONF_CHECK_I(fdi_lanes);
8937         PIPE_CONF_CHECK_M_N(fdi_m_n);
8938
8939         PIPE_CONF_CHECK_I(lane_count);
8940         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8941
8942         if (INTEL_GEN(dev_priv) < 8) {
8943                 PIPE_CONF_CHECK_M_N(dp_m_n);
8944
8945                 if (current_config->has_drrs)
8946                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
8947         } else
8948                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8949
8950         PIPE_CONF_CHECK_X(output_types);
8951
8952         /* FIXME do the readout properly and get rid of this quirk */
8953         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8954                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8955                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8956                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8957                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8958                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8959                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8960
8961                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8962                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8963                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8964                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8965                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8966                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8967
8968                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8969                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8970                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8971                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8972                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8973                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8974
8975                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8976                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8977                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8978                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8979                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8980                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8981
8982                 PIPE_CONF_CHECK_I(pixel_multiplier);
8983
8984                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8985                                       DRM_MODE_FLAG_INTERLACE);
8986
8987                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8988                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8989                                               DRM_MODE_FLAG_PHSYNC);
8990                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8991                                               DRM_MODE_FLAG_NHSYNC);
8992                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8993                                               DRM_MODE_FLAG_PVSYNC);
8994                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8995                                               DRM_MODE_FLAG_NVSYNC);
8996                 }
8997         }
8998
8999         PIPE_CONF_CHECK_I(output_format);
9000         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
9001         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
9002             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9003                 PIPE_CONF_CHECK_BOOL(limited_color_range);
9004
9005         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
9006         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
9007         PIPE_CONF_CHECK_BOOL(has_infoframe);
9008         /* FIXME do the readout properly and get rid of this quirk */
9009         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9010                 PIPE_CONF_CHECK_BOOL(fec_enable);
9011
9012         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
9013
9014         PIPE_CONF_CHECK_X(gmch_pfit.control);
9015         /* pfit ratios are autocomputed by the hw on gen4+ */
9016         if (INTEL_GEN(dev_priv) < 4)
9017                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
9018         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9019
9020         /*
9021          * Changing the EDP transcoder input mux
9022          * (A_ONOFF vs. A_ON) requires a full modeset.
9023          */
9024         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
9025
9026         if (!fastset) {
9027                 PIPE_CONF_CHECK_I(pipe_src_w);
9028                 PIPE_CONF_CHECK_I(pipe_src_h);
9029
9030                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
9031                 if (current_config->pch_pfit.enabled) {
9032                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
9033                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
9034                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
9035                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
9036                 }
9037
9038                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
9039                 /* FIXME do the readout properly and get rid of this quirk */
9040                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
9041                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
9042
9043                 PIPE_CONF_CHECK_X(gamma_mode);
9044                 if (IS_CHERRYVIEW(dev_priv))
9045                         PIPE_CONF_CHECK_X(cgm_mode);
9046                 else
9047                         PIPE_CONF_CHECK_X(csc_mode);
9048                 PIPE_CONF_CHECK_BOOL(gamma_enable);
9049                 PIPE_CONF_CHECK_BOOL(csc_enable);
9050
9051                 PIPE_CONF_CHECK_I(linetime);
9052                 PIPE_CONF_CHECK_I(ips_linetime);
9053
9054                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
9055                 if (bp_gamma)
9056                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
9057         }
9058
9059         PIPE_CONF_CHECK_BOOL(double_wide);
9060
9061         PIPE_CONF_CHECK_P(shared_dpll);
9062
9063         /* FIXME do the readout properly and get rid of this quirk */
9064         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
9065                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9066                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9067                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9068                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9069                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
9070                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
9071                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
9072                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
9073                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
9074                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
9075                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
9076                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
9077                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
9078                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
9079                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
9080                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
9081                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
9082                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
9083                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
9084                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
9085                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
9086                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
9087                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
9088                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
9089                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
9090                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
9091                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
9092                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
9093                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
9094                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
9095                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
9096
9097                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
9098                 PIPE_CONF_CHECK_X(dsi_pll.div);
9099
9100                 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
9101                         PIPE_CONF_CHECK_I(pipe_bpp);
9102
9103                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
9104                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
9105                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9106
9107                 PIPE_CONF_CHECK_I(min_voltage_level);
9108         }
9109
9110         PIPE_CONF_CHECK_X(infoframes.enable);
9111         PIPE_CONF_CHECK_X(infoframes.gcp);
9112         PIPE_CONF_CHECK_INFOFRAME(avi);
9113         PIPE_CONF_CHECK_INFOFRAME(spd);
9114         PIPE_CONF_CHECK_INFOFRAME(hdmi);
9115         PIPE_CONF_CHECK_INFOFRAME(drm);
9116         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
9117
9118         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
9119         PIPE_CONF_CHECK_I(master_transcoder);
9120         PIPE_CONF_CHECK_BOOL(bigjoiner);
9121         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
9122         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
9123
9124         PIPE_CONF_CHECK_I(dsc.compression_enable);
9125         PIPE_CONF_CHECK_I(dsc.dsc_split);
9126         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
9127
9128         PIPE_CONF_CHECK_BOOL(splitter.enable);
9129         PIPE_CONF_CHECK_I(splitter.link_count);
9130         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
9131
9132         PIPE_CONF_CHECK_I(mst_master_transcoder);
9133
9134         PIPE_CONF_CHECK_BOOL(vrr.enable);
9135         PIPE_CONF_CHECK_I(vrr.vmin);
9136         PIPE_CONF_CHECK_I(vrr.vmax);
9137         PIPE_CONF_CHECK_I(vrr.flipline);
9138         PIPE_CONF_CHECK_I(vrr.pipeline_full);
9139
9140 #undef PIPE_CONF_CHECK_X
9141 #undef PIPE_CONF_CHECK_I
9142 #undef PIPE_CONF_CHECK_BOOL
9143 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
9144 #undef PIPE_CONF_CHECK_P
9145 #undef PIPE_CONF_CHECK_FLAGS
9146 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
9147 #undef PIPE_CONF_CHECK_COLOR_LUT
9148 #undef PIPE_CONF_QUIRK
9149
9150         return ret;
9151 }
9152
9153 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
9154                                            const struct intel_crtc_state *pipe_config)
9155 {
9156         if (pipe_config->has_pch_encoder) {
9157                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
9158                                                             &pipe_config->fdi_m_n);
9159                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
9160
9161                 /*
9162                  * FDI already provided one idea for the dotclock.
9163                  * Yell if the encoder disagrees.
9164                  */
9165                 drm_WARN(&dev_priv->drm,
9166                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
9167                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9168                          fdi_dotclock, dotclock);
9169         }
9170 }
9171
9172 static void verify_wm_state(struct intel_crtc *crtc,
9173                             struct intel_crtc_state *new_crtc_state)
9174 {
9175         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9176         struct skl_hw_state {
9177                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
9178                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
9179                 struct skl_pipe_wm wm;
9180         } *hw;
9181         const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
9182         int level, max_level = ilk_wm_max_level(dev_priv);
9183         struct intel_plane *plane;
9184         u8 hw_enabled_slices;
9185
9186         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
9187                 return;
9188
9189         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
9190         if (!hw)
9191                 return;
9192
9193         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
9194
9195         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
9196
9197         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
9198
9199         if (INTEL_GEN(dev_priv) >= 11 &&
9200             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
9201                 drm_err(&dev_priv->drm,
9202                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
9203                         dev_priv->dbuf.enabled_slices,
9204                         hw_enabled_slices);
9205
9206         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9207                 const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
9208                 const struct skl_wm_level *hw_wm_level, *sw_wm_level;
9209
9210                 /* Watermarks */
9211                 for (level = 0; level <= max_level; level++) {
9212                         hw_wm_level = &hw->wm.planes[plane->id].wm[level];
9213                         sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
9214
9215                         if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
9216                                 continue;
9217
9218                         drm_err(&dev_priv->drm,
9219                                 "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9220                                 plane->base.base.id, plane->base.name, level,
9221                                 sw_wm_level->enable,
9222                                 sw_wm_level->blocks,
9223                                 sw_wm_level->lines,
9224                                 hw_wm_level->enable,
9225                                 hw_wm_level->blocks,
9226                                 hw_wm_level->lines);
9227                 }
9228
9229                 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
9230                 sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
9231
9232                 if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
9233                         drm_err(&dev_priv->drm,
9234                                 "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
9235                                 plane->base.base.id, plane->base.name,
9236                                 sw_wm_level->enable,
9237                                 sw_wm_level->blocks,
9238                                 sw_wm_level->lines,
9239                                 hw_wm_level->enable,
9240                                 hw_wm_level->blocks,
9241                                 hw_wm_level->lines);
9242                 }
9243
9244                 /* DDB */
9245                 hw_ddb_entry = &hw->ddb_y[plane->id];
9246                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
9247
9248                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
9249                         drm_err(&dev_priv->drm,
9250                                 "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
9251                                 plane->base.base.id, plane->base.name,
9252                                 sw_ddb_entry->start, sw_ddb_entry->end,
9253                                 hw_ddb_entry->start, hw_ddb_entry->end);
9254                 }
9255         }
9256
9257         kfree(hw);
9258 }
9259
9260 static void
9261 verify_connector_state(struct intel_atomic_state *state,
9262                        struct intel_crtc *crtc)
9263 {
9264         struct drm_connector *connector;
9265         struct drm_connector_state *new_conn_state;
9266         int i;
9267
9268         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
9269                 struct drm_encoder *encoder = connector->encoder;
9270                 struct intel_crtc_state *crtc_state = NULL;
9271
9272                 if (new_conn_state->crtc != &crtc->base)
9273                         continue;
9274
9275                 if (crtc)
9276                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
9277
9278                 intel_connector_verify_state(crtc_state, new_conn_state);
9279
9280                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
9281                      "connector's atomic encoder doesn't match legacy encoder\n");
9282         }
9283 }
9284
9285 static void
9286 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
9287 {
9288         struct intel_encoder *encoder;
9289         struct drm_connector *connector;
9290         struct drm_connector_state *old_conn_state, *new_conn_state;
9291         int i;
9292
9293         for_each_intel_encoder(&dev_priv->drm, encoder) {
9294                 bool enabled = false, found = false;
9295                 enum pipe pipe;
9296
9297                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
9298                             encoder->base.base.id,
9299                             encoder->base.name);
9300
9301                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
9302                                                    new_conn_state, i) {
9303                         if (old_conn_state->best_encoder == &encoder->base)
9304                                 found = true;
9305
9306                         if (new_conn_state->best_encoder != &encoder->base)
9307                                 continue;
9308                         found = enabled = true;
9309
9310                         I915_STATE_WARN(new_conn_state->crtc !=
9311                                         encoder->base.crtc,
9312                              "connector's crtc doesn't match encoder crtc\n");
9313                 }
9314
9315                 if (!found)
9316                         continue;
9317
9318                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
9319                      "encoder's enabled state mismatch "
9320                      "(expected %i, found %i)\n",
9321                      !!encoder->base.crtc, enabled);
9322
9323                 if (!encoder->base.crtc) {
9324                         bool active;
9325
9326                         active = encoder->get_hw_state(encoder, &pipe);
9327                         I915_STATE_WARN(active,
9328                              "encoder detached but still enabled on pipe %c.\n",
9329                              pipe_name(pipe));
9330                 }
9331         }
9332 }
9333
9334 static void
9335 verify_crtc_state(struct intel_crtc *crtc,
9336                   struct intel_crtc_state *old_crtc_state,
9337                   struct intel_crtc_state *new_crtc_state)
9338 {
9339         struct drm_device *dev = crtc->base.dev;
9340         struct drm_i915_private *dev_priv = to_i915(dev);
9341         struct intel_encoder *encoder;
9342         struct intel_crtc_state *pipe_config = old_crtc_state;
9343         struct drm_atomic_state *state = old_crtc_state->uapi.state;
9344         struct intel_crtc *master = crtc;
9345
9346         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
9347         intel_crtc_free_hw_state(old_crtc_state);
9348         intel_crtc_state_reset(old_crtc_state, crtc);
9349         old_crtc_state->uapi.state = state;
9350
9351         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
9352                     crtc->base.name);
9353
9354         pipe_config->hw.enable = new_crtc_state->hw.enable;
9355
9356         intel_crtc_get_pipe_config(pipe_config);
9357
9358         /* we keep both pipes enabled on 830 */
9359         if (IS_I830(dev_priv) && pipe_config->hw.active)
9360                 pipe_config->hw.active = new_crtc_state->hw.active;
9361
9362         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
9363                         "crtc active state doesn't match with hw state "
9364                         "(expected %i, found %i)\n",
9365                         new_crtc_state->hw.active, pipe_config->hw.active);
9366
9367         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
9368                         "transitional active state does not match atomic hw state "
9369                         "(expected %i, found %i)\n",
9370                         new_crtc_state->hw.active, crtc->active);
9371
9372         if (new_crtc_state->bigjoiner_slave)
9373                 master = new_crtc_state->bigjoiner_linked_crtc;
9374
9375         for_each_encoder_on_crtc(dev, &master->base, encoder) {
9376                 enum pipe pipe;
9377                 bool active;
9378
9379                 active = encoder->get_hw_state(encoder, &pipe);
9380                 I915_STATE_WARN(active != new_crtc_state->hw.active,
9381                                 "[ENCODER:%i] active %i with crtc active %i\n",
9382                                 encoder->base.base.id, active,
9383                                 new_crtc_state->hw.active);
9384
9385                 I915_STATE_WARN(active && master->pipe != pipe,
9386                                 "Encoder connected to wrong pipe %c\n",
9387                                 pipe_name(pipe));
9388
9389                 if (active)
9390                         intel_encoder_get_config(encoder, pipe_config);
9391         }
9392
9393         if (!new_crtc_state->hw.active)
9394                 return;
9395
9396         intel_pipe_config_sanity_check(dev_priv, pipe_config);
9397
9398         if (!intel_pipe_config_compare(new_crtc_state,
9399                                        pipe_config, false)) {
9400                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
9401                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
9402                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
9403         }
9404 }
9405
9406 static void
9407 intel_verify_planes(struct intel_atomic_state *state)
9408 {
9409         struct intel_plane *plane;
9410         const struct intel_plane_state *plane_state;
9411         int i;
9412
9413         for_each_new_intel_plane_in_state(state, plane,
9414                                           plane_state, i)
9415                 assert_plane(plane, plane_state->planar_slave ||
9416                              plane_state->uapi.visible);
9417 }
9418
9419 static void
9420 verify_single_dpll_state(struct drm_i915_private *dev_priv,
9421                          struct intel_shared_dpll *pll,
9422                          struct intel_crtc *crtc,
9423                          struct intel_crtc_state *new_crtc_state)
9424 {
9425         struct intel_dpll_hw_state dpll_hw_state;
9426         u8 pipe_mask;
9427         bool active;
9428
9429         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9430
9431         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
9432
9433         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
9434
9435         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
9436                 I915_STATE_WARN(!pll->on && pll->active_mask,
9437                      "pll in active use but not on in sw tracking\n");
9438                 I915_STATE_WARN(pll->on && !pll->active_mask,
9439                      "pll is on but not used by any active pipe\n");
9440                 I915_STATE_WARN(pll->on != active,
9441                      "pll on state mismatch (expected %i, found %i)\n",
9442                      pll->on, active);
9443         }
9444
9445         if (!crtc) {
9446                 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
9447                                 "more active pll users than references: 0x%x vs 0x%x\n",
9448                                 pll->active_mask, pll->state.pipe_mask);
9449
9450                 return;
9451         }
9452
9453         pipe_mask = BIT(crtc->pipe);
9454
9455         if (new_crtc_state->hw.active)
9456                 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
9457                                 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
9458                                 pipe_name(crtc->pipe), pll->active_mask);
9459         else
9460                 I915_STATE_WARN(pll->active_mask & pipe_mask,
9461                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
9462                                 pipe_name(crtc->pipe), pll->active_mask);
9463
9464         I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
9465                         "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
9466                         pipe_mask, pll->state.pipe_mask);
9467
9468         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
9469                                           &dpll_hw_state,
9470                                           sizeof(dpll_hw_state)),
9471                         "pll hw state mismatch\n");
9472 }
9473
9474 static void
9475 verify_shared_dpll_state(struct intel_crtc *crtc,
9476                          struct intel_crtc_state *old_crtc_state,
9477                          struct intel_crtc_state *new_crtc_state)
9478 {
9479         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9480
9481         if (new_crtc_state->shared_dpll)
9482                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
9483
9484         if (old_crtc_state->shared_dpll &&
9485             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
9486                 u8 pipe_mask = BIT(crtc->pipe);
9487                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
9488
9489                 I915_STATE_WARN(pll->active_mask & pipe_mask,
9490                                 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
9491                                 pipe_name(crtc->pipe), pll->active_mask);
9492                 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
9493                                 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
9494                                 pipe_name(crtc->pipe), pll->state.pipe_mask);
9495         }
9496 }
9497
9498 static void
9499 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9500                           struct intel_atomic_state *state,
9501                           struct intel_crtc_state *old_crtc_state,
9502                           struct intel_crtc_state *new_crtc_state)
9503 {
9504         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9505                 return;
9506
9507         verify_wm_state(crtc, new_crtc_state);
9508         verify_connector_state(state, crtc);
9509         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9510         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9511 }
9512
9513 static void
9514 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9515 {
9516         int i;
9517
9518         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9519                 verify_single_dpll_state(dev_priv,
9520                                          &dev_priv->dpll.shared_dplls[i],
9521                                          NULL, NULL);
9522 }
9523
9524 static void
9525 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9526                               struct intel_atomic_state *state)
9527 {
9528         verify_encoder_state(dev_priv, state);
9529         verify_connector_state(state, NULL);
9530         verify_disabled_dpll_state(dev_priv);
9531 }
9532
9533 static void
9534 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9535 {
9536         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9537         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9538         struct drm_display_mode adjusted_mode =
9539                 crtc_state->hw.adjusted_mode;
9540
9541         if (crtc_state->vrr.enable) {
9542                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9543                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9544                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9545                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9546         }
9547
9548         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9549
9550         crtc->mode_flags = crtc_state->mode_flags;
9551
9552         /*
9553          * The scanline counter increments at the leading edge of hsync.
9554          *
9555          * On most platforms it starts counting from vtotal-1 on the
9556          * first active line. That means the scanline counter value is
9557          * always one less than what we would expect. Ie. just after
9558          * start of vblank, which also occurs at start of hsync (on the
9559          * last active line), the scanline counter will read vblank_start-1.
9560          *
9561          * On gen2 the scanline counter starts counting from 1 instead
9562          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9563          * to keep the value positive), instead of adding one.
9564          *
9565          * On HSW+ the behaviour of the scanline counter depends on the output
9566          * type. For DP ports it behaves like most other platforms, but on HDMI
9567          * there's an extra 1 line difference. So we need to add two instead of
9568          * one to the value.
9569          *
9570          * On VLV/CHV DSI the scanline counter would appear to increment
9571          * approx. 1/3 of a scanline before start of vblank. Unfortunately
9572          * that means we can't tell whether we're in vblank or not while
9573          * we're on that particular line. We must still set scanline_offset
9574          * to 1 so that the vblank timestamps come out correct when we query
9575          * the scanline counter from within the vblank interrupt handler.
9576          * However if queried just before the start of vblank we'll get an
9577          * answer that's slightly in the future.
9578          */
9579         if (IS_GEN(dev_priv, 2)) {
9580                 int vtotal;
9581
9582                 vtotal = adjusted_mode.crtc_vtotal;
9583                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9584                         vtotal /= 2;
9585
9586                 crtc->scanline_offset = vtotal - 1;
9587         } else if (HAS_DDI(dev_priv) &&
9588                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9589                 crtc->scanline_offset = 2;
9590         } else {
9591                 crtc->scanline_offset = 1;
9592         }
9593 }
9594
9595 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9596 {
9597         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9598         struct intel_crtc_state *new_crtc_state;
9599         struct intel_crtc *crtc;
9600         int i;
9601
9602         if (!dev_priv->display.crtc_compute_clock)
9603                 return;
9604
9605         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9606                 if (!intel_crtc_needs_modeset(new_crtc_state))
9607                         continue;
9608
9609                 intel_release_shared_dplls(state, crtc);
9610         }
9611 }
9612
9613 /*
9614  * This implements the workaround described in the "notes" section of the mode
9615  * set sequence documentation. When going from no pipes or single pipe to
9616  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9617  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9618  */
9619 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9620 {
9621         struct intel_crtc_state *crtc_state;
9622         struct intel_crtc *crtc;
9623         struct intel_crtc_state *first_crtc_state = NULL;
9624         struct intel_crtc_state *other_crtc_state = NULL;
9625         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9626         int i;
9627
9628         /* look at all crtc's that are going to be enabled in during modeset */
9629         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9630                 if (!crtc_state->hw.active ||
9631                     !intel_crtc_needs_modeset(crtc_state))
9632                         continue;
9633
9634                 if (first_crtc_state) {
9635                         other_crtc_state = crtc_state;
9636                         break;
9637                 } else {
9638                         first_crtc_state = crtc_state;
9639                         first_pipe = crtc->pipe;
9640                 }
9641         }
9642
9643         /* No workaround needed? */
9644         if (!first_crtc_state)
9645                 return 0;
9646
9647         /* w/a possibly needed, check how many crtc's are already enabled. */
9648         for_each_intel_crtc(state->base.dev, crtc) {
9649                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9650                 if (IS_ERR(crtc_state))
9651                         return PTR_ERR(crtc_state);
9652
9653                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9654
9655                 if (!crtc_state->hw.active ||
9656                     intel_crtc_needs_modeset(crtc_state))
9657                         continue;
9658
9659                 /* 2 or more enabled crtcs means no need for w/a */
9660                 if (enabled_pipe != INVALID_PIPE)
9661                         return 0;
9662
9663                 enabled_pipe = crtc->pipe;
9664         }
9665
9666         if (enabled_pipe != INVALID_PIPE)
9667                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9668         else if (other_crtc_state)
9669                 other_crtc_state->hsw_workaround_pipe = first_pipe;
9670
9671         return 0;
9672 }
9673
9674 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9675                            u8 active_pipes)
9676 {
9677         const struct intel_crtc_state *crtc_state;
9678         struct intel_crtc *crtc;
9679         int i;
9680
9681         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9682                 if (crtc_state->hw.active)
9683                         active_pipes |= BIT(crtc->pipe);
9684                 else
9685                         active_pipes &= ~BIT(crtc->pipe);
9686         }
9687
9688         return active_pipes;
9689 }
9690
9691 static int intel_modeset_checks(struct intel_atomic_state *state)
9692 {
9693         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9694
9695         state->modeset = true;
9696
9697         if (IS_HASWELL(dev_priv))
9698                 return hsw_mode_set_planes_workaround(state);
9699
9700         return 0;
9701 }
9702
9703 /*
9704  * Handle calculation of various watermark data at the end of the atomic check
9705  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9706  * handlers to ensure that all derived state has been updated.
9707  */
9708 static int calc_watermark_data(struct intel_atomic_state *state)
9709 {
9710         struct drm_device *dev = state->base.dev;
9711         struct drm_i915_private *dev_priv = to_i915(dev);
9712
9713         /* Is there platform-specific watermark information to calculate? */
9714         if (dev_priv->display.compute_global_watermarks)
9715                 return dev_priv->display.compute_global_watermarks(state);
9716
9717         return 0;
9718 }
9719
9720 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9721                                      struct intel_crtc_state *new_crtc_state)
9722 {
9723         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9724                 return;
9725
9726         new_crtc_state->uapi.mode_changed = false;
9727         new_crtc_state->update_pipe = true;
9728 }
9729
9730 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9731                                     struct intel_crtc_state *new_crtc_state)
9732 {
9733         /*
9734          * If we're not doing the full modeset we want to
9735          * keep the current M/N values as they may be
9736          * sufficiently different to the computed values
9737          * to cause problems.
9738          *
9739          * FIXME: should really copy more fuzzy state here
9740          */
9741         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9742         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9743         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9744         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9745 }
9746
9747 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9748                                           struct intel_crtc *crtc,
9749                                           u8 plane_ids_mask)
9750 {
9751         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9752         struct intel_plane *plane;
9753
9754         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9755                 struct intel_plane_state *plane_state;
9756
9757                 if ((plane_ids_mask & BIT(plane->id)) == 0)
9758                         continue;
9759
9760                 plane_state = intel_atomic_get_plane_state(state, plane);
9761                 if (IS_ERR(plane_state))
9762                         return PTR_ERR(plane_state);
9763         }
9764
9765         return 0;
9766 }
9767
9768 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9769                                      struct intel_crtc *crtc)
9770 {
9771         const struct intel_crtc_state *old_crtc_state =
9772                 intel_atomic_get_old_crtc_state(state, crtc);
9773         const struct intel_crtc_state *new_crtc_state =
9774                 intel_atomic_get_new_crtc_state(state, crtc);
9775
9776         return intel_crtc_add_planes_to_state(state, crtc,
9777                                               old_crtc_state->enabled_planes |
9778                                               new_crtc_state->enabled_planes);
9779 }
9780
9781 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9782 {
9783         /* See {hsw,vlv,ivb}_plane_ratio() */
9784         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9785                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9786                 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
9787 }
9788
9789 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9790                                            struct intel_crtc *crtc,
9791                                            struct intel_crtc *other)
9792 {
9793         const struct intel_plane_state *plane_state;
9794         struct intel_plane *plane;
9795         u8 plane_ids = 0;
9796         int i;
9797
9798         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9799                 if (plane->pipe == crtc->pipe)
9800                         plane_ids |= BIT(plane->id);
9801         }
9802
9803         return intel_crtc_add_planes_to_state(state, other, plane_ids);
9804 }
9805
9806 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9807 {
9808         const struct intel_crtc_state *crtc_state;
9809         struct intel_crtc *crtc;
9810         int i;
9811
9812         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9813                 int ret;
9814
9815                 if (!crtc_state->bigjoiner)
9816                         continue;
9817
9818                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9819                                                       crtc_state->bigjoiner_linked_crtc);
9820                 if (ret)
9821                         return ret;
9822         }
9823
9824         return 0;
9825 }
9826
9827 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9828 {
9829         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9830         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9831         struct intel_plane_state *plane_state;
9832         struct intel_plane *plane;
9833         struct intel_crtc *crtc;
9834         int i, ret;
9835
9836         ret = icl_add_linked_planes(state);
9837         if (ret)
9838                 return ret;
9839
9840         ret = intel_bigjoiner_add_affected_planes(state);
9841         if (ret)
9842                 return ret;
9843
9844         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9845                 ret = intel_plane_atomic_check(state, plane);
9846                 if (ret) {
9847                         drm_dbg_atomic(&dev_priv->drm,
9848                                        "[PLANE:%d:%s] atomic driver check failed\n",
9849                                        plane->base.base.id, plane->base.name);
9850                         return ret;
9851                 }
9852         }
9853
9854         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9855                                             new_crtc_state, i) {
9856                 u8 old_active_planes, new_active_planes;
9857
9858                 ret = icl_check_nv12_planes(new_crtc_state);
9859                 if (ret)
9860                         return ret;
9861
9862                 /*
9863                  * On some platforms the number of active planes affects
9864                  * the planes' minimum cdclk calculation. Add such planes
9865                  * to the state before we compute the minimum cdclk.
9866                  */
9867                 if (!active_planes_affects_min_cdclk(dev_priv))
9868                         continue;
9869
9870                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9871                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9872
9873                 /*
9874                  * Not only the number of planes, but if the plane configuration had
9875                  * changed might already mean we need to recompute min CDCLK,
9876                  * because different planes might consume different amount of Dbuf bandwidth
9877                  * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
9878                  */
9879                 if (old_active_planes == new_active_planes)
9880                         continue;
9881
9882                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9883                 if (ret)
9884                         return ret;
9885         }
9886
9887         return 0;
9888 }
9889
9890 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9891                                     bool *need_cdclk_calc)
9892 {
9893         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9894         const struct intel_cdclk_state *old_cdclk_state;
9895         const struct intel_cdclk_state *new_cdclk_state;
9896         struct intel_plane_state *plane_state;
9897         struct intel_bw_state *new_bw_state;
9898         struct intel_plane *plane;
9899         int min_cdclk = 0;
9900         enum pipe pipe;
9901         int ret;
9902         int i;
9903         /*
9904          * active_planes bitmask has been updated, and potentially
9905          * affected planes are part of the state. We can now
9906          * compute the minimum cdclk for each plane.
9907          */
9908         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9909                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9910                 if (ret)
9911                         return ret;
9912         }
9913
9914         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9915         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9916
9917         if (new_cdclk_state &&
9918             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9919                 *need_cdclk_calc = true;
9920
9921         ret = dev_priv->display.bw_calc_min_cdclk(state);
9922         if (ret)
9923                 return ret;
9924
9925         new_bw_state = intel_atomic_get_new_bw_state(state);
9926
9927         if (!new_cdclk_state || !new_bw_state)
9928                 return 0;
9929
9930         for_each_pipe(dev_priv, pipe) {
9931                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9932
9933                 /*
9934                  * Currently do this change only if we need to increase
9935                  */
9936                 if (new_bw_state->min_cdclk > min_cdclk)
9937                         *need_cdclk_calc = true;
9938         }
9939
9940         return 0;
9941 }
9942
9943 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9944 {
9945         struct intel_crtc_state *crtc_state;
9946         struct intel_crtc *crtc;
9947         int i;
9948
9949         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9950                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9951                 int ret;
9952
9953                 ret = intel_crtc_atomic_check(state, crtc);
9954                 if (ret) {
9955                         drm_dbg_atomic(&i915->drm,
9956                                        "[CRTC:%d:%s] atomic driver check failed\n",
9957                                        crtc->base.base.id, crtc->base.name);
9958                         return ret;
9959                 }
9960         }
9961
9962         return 0;
9963 }
9964
9965 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9966                                                u8 transcoders)
9967 {
9968         const struct intel_crtc_state *new_crtc_state;
9969         struct intel_crtc *crtc;
9970         int i;
9971
9972         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9973                 if (new_crtc_state->hw.enable &&
9974                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9975                     intel_crtc_needs_modeset(new_crtc_state))
9976                         return true;
9977         }
9978
9979         return false;
9980 }
9981
9982 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9983                                         struct intel_crtc *crtc,
9984                                         struct intel_crtc_state *old_crtc_state,
9985                                         struct intel_crtc_state *new_crtc_state)
9986 {
9987         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9988         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9989         struct intel_crtc *slave, *master;
9990
9991         /* slave being enabled, is master is still claiming this crtc? */
9992         if (old_crtc_state->bigjoiner_slave) {
9993                 slave = crtc;
9994                 master = old_crtc_state->bigjoiner_linked_crtc;
9995                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9996                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9997                         goto claimed;
9998         }
9999
10000         if (!new_crtc_state->bigjoiner)
10001                 return 0;
10002
10003         if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
10004                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
10005                               "CRTC + 1 to be used, doesn't exist\n",
10006                               crtc->base.base.id, crtc->base.name);
10007                 return -EINVAL;
10008         }
10009
10010         slave = new_crtc_state->bigjoiner_linked_crtc =
10011                 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
10012         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
10013         master = crtc;
10014         if (IS_ERR(slave_crtc_state))
10015                 return PTR_ERR(slave_crtc_state);
10016
10017         /* master being enabled, slave was already configured? */
10018         if (slave_crtc_state->uapi.enable)
10019                 goto claimed;
10020
10021         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
10022                       slave->base.base.id, slave->base.name);
10023
10024         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
10025
10026 claimed:
10027         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
10028                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
10029                       slave->base.base.id, slave->base.name,
10030                       master->base.base.id, master->base.name);
10031         return -EINVAL;
10032 }
10033
10034 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
10035                                  struct intel_crtc_state *master_crtc_state)
10036 {
10037         struct intel_crtc_state *slave_crtc_state =
10038                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
10039
10040         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
10041         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
10042         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
10043         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
10044 }
10045
10046 /**
10047  * DOC: asynchronous flip implementation
10048  *
10049  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
10050  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
10051  * Correspondingly, support is currently added for primary plane only.
10052  *
10053  * Async flip can only change the plane surface address, so anything else
10054  * changing is rejected from the intel_atomic_check_async() function.
10055  * Once this check is cleared, flip done interrupt is enabled using
10056  * the intel_crtc_enable_flip_done() function.
10057  *
10058  * As soon as the surface address register is written, flip done interrupt is
10059  * generated and the requested events are sent to the usersapce in the interrupt
10060  * handler itself. The timestamp and sequence sent during the flip done event
10061  * correspond to the last vblank and have no relation to the actual time when
10062  * the flip done event was sent.
10063  */
10064 static int intel_atomic_check_async(struct intel_atomic_state *state)
10065 {
10066         struct drm_i915_private *i915 = to_i915(state->base.dev);
10067         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10068         const struct intel_plane_state *new_plane_state, *old_plane_state;
10069         struct intel_crtc *crtc;
10070         struct intel_plane *plane;
10071         int i;
10072
10073         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10074                                             new_crtc_state, i) {
10075                 if (intel_crtc_needs_modeset(new_crtc_state)) {
10076                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
10077                         return -EINVAL;
10078                 }
10079
10080                 if (!new_crtc_state->hw.active) {
10081                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
10082                         return -EINVAL;
10083                 }
10084                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
10085                         drm_dbg_kms(&i915->drm,
10086                                     "Active planes cannot be changed during async flip\n");
10087                         return -EINVAL;
10088                 }
10089         }
10090
10091         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10092                                              new_plane_state, i) {
10093                 /*
10094                  * TODO: Async flip is only supported through the page flip IOCTL
10095                  * as of now. So support currently added for primary plane only.
10096                  * Support for other planes on platforms on which supports
10097                  * this(vlv/chv and icl+) should be added when async flip is
10098                  * enabled in the atomic IOCTL path.
10099                  */
10100                 if (!plane->async_flip)
10101                         return -EINVAL;
10102
10103                 /*
10104                  * FIXME: This check is kept generic for all platforms.
10105                  * Need to verify this for all gen9 and gen10 platforms to enable
10106                  * this selectively if required.
10107                  */
10108                 switch (new_plane_state->hw.fb->modifier) {
10109                 case I915_FORMAT_MOD_X_TILED:
10110                 case I915_FORMAT_MOD_Y_TILED:
10111                 case I915_FORMAT_MOD_Yf_TILED:
10112                         break;
10113                 default:
10114                         drm_dbg_kms(&i915->drm,
10115                                     "Linear memory/CCS does not support async flips\n");
10116                         return -EINVAL;
10117                 }
10118
10119                 if (old_plane_state->color_plane[0].stride !=
10120                     new_plane_state->color_plane[0].stride) {
10121                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
10122                         return -EINVAL;
10123                 }
10124
10125                 if (old_plane_state->hw.fb->modifier !=
10126                     new_plane_state->hw.fb->modifier) {
10127                         drm_dbg_kms(&i915->drm,
10128                                     "Framebuffer modifiers cannot be changed in async flip\n");
10129                         return -EINVAL;
10130                 }
10131
10132                 if (old_plane_state->hw.fb->format !=
10133                     new_plane_state->hw.fb->format) {
10134                         drm_dbg_kms(&i915->drm,
10135                                     "Framebuffer format cannot be changed in async flip\n");
10136                         return -EINVAL;
10137                 }
10138
10139                 if (old_plane_state->hw.rotation !=
10140                     new_plane_state->hw.rotation) {
10141                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
10142                         return -EINVAL;
10143                 }
10144
10145                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
10146                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
10147                         drm_dbg_kms(&i915->drm,
10148                                     "Plane size/co-ordinates cannot be changed in async flip\n");
10149                         return -EINVAL;
10150                 }
10151
10152                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
10153                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
10154                         return -EINVAL;
10155                 }
10156
10157                 if (old_plane_state->hw.pixel_blend_mode !=
10158                     new_plane_state->hw.pixel_blend_mode) {
10159                         drm_dbg_kms(&i915->drm,
10160                                     "Pixel blend mode cannot be changed in async flip\n");
10161                         return -EINVAL;
10162                 }
10163
10164                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
10165                         drm_dbg_kms(&i915->drm,
10166                                     "Color encoding cannot be changed in async flip\n");
10167                         return -EINVAL;
10168                 }
10169
10170                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
10171                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
10172                         return -EINVAL;
10173                 }
10174         }
10175
10176         return 0;
10177 }
10178
10179 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
10180 {
10181         struct intel_crtc_state *crtc_state;
10182         struct intel_crtc *crtc;
10183         int i;
10184
10185         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10186                 struct intel_crtc_state *linked_crtc_state;
10187                 struct intel_crtc *linked_crtc;
10188                 int ret;
10189
10190                 if (!crtc_state->bigjoiner)
10191                         continue;
10192
10193                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
10194                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
10195                 if (IS_ERR(linked_crtc_state))
10196                         return PTR_ERR(linked_crtc_state);
10197
10198                 if (!intel_crtc_needs_modeset(crtc_state))
10199                         continue;
10200
10201                 linked_crtc_state->uapi.mode_changed = true;
10202
10203                 ret = drm_atomic_add_affected_connectors(&state->base,
10204                                                          &linked_crtc->base);
10205                 if (ret)
10206                         return ret;
10207
10208                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
10209                 if (ret)
10210                         return ret;
10211         }
10212
10213         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10214                 /* Kill old bigjoiner link, we may re-establish afterwards */
10215                 if (intel_crtc_needs_modeset(crtc_state) &&
10216                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
10217                         kill_bigjoiner_slave(state, crtc_state);
10218         }
10219
10220         return 0;
10221 }
10222
10223 /**
10224  * intel_atomic_check - validate state object
10225  * @dev: drm device
10226  * @_state: state to validate
10227  */
10228 static int intel_atomic_check(struct drm_device *dev,
10229                               struct drm_atomic_state *_state)
10230 {
10231         struct drm_i915_private *dev_priv = to_i915(dev);
10232         struct intel_atomic_state *state = to_intel_atomic_state(_state);
10233         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10234         struct intel_crtc *crtc;
10235         int ret, i;
10236         bool any_ms = false;
10237
10238         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10239                                             new_crtc_state, i) {
10240                 if (new_crtc_state->inherited != old_crtc_state->inherited)
10241                         new_crtc_state->uapi.mode_changed = true;
10242         }
10243
10244         intel_vrr_check_modeset(state);
10245
10246         ret = drm_atomic_helper_check_modeset(dev, &state->base);
10247         if (ret)
10248                 goto fail;
10249
10250         ret = intel_bigjoiner_add_affected_crtcs(state);
10251         if (ret)
10252                 goto fail;
10253
10254         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10255                                             new_crtc_state, i) {
10256                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10257                         /* Light copy */
10258                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
10259
10260                         continue;
10261                 }
10262
10263                 if (!new_crtc_state->uapi.enable) {
10264                         if (!new_crtc_state->bigjoiner_slave) {
10265                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
10266                                 any_ms = true;
10267                         }
10268                         continue;
10269                 }
10270
10271                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
10272                 if (ret)
10273                         goto fail;
10274
10275                 ret = intel_modeset_pipe_config(state, new_crtc_state);
10276                 if (ret)
10277                         goto fail;
10278
10279                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
10280                                                    new_crtc_state);
10281                 if (ret)
10282                         goto fail;
10283         }
10284
10285         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10286                                             new_crtc_state, i) {
10287                 if (!intel_crtc_needs_modeset(new_crtc_state))
10288                         continue;
10289
10290                 ret = intel_modeset_pipe_config_late(new_crtc_state);
10291                 if (ret)
10292                         goto fail;
10293
10294                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
10295         }
10296
10297         /**
10298          * Check if fastset is allowed by external dependencies like other
10299          * pipes and transcoders.
10300          *
10301          * Right now it only forces a fullmodeset when the MST master
10302          * transcoder did not changed but the pipe of the master transcoder
10303          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
10304          * in case of port synced crtcs, if one of the synced crtcs
10305          * needs a full modeset, all other synced crtcs should be
10306          * forced a full modeset.
10307          */
10308         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10309                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
10310                         continue;
10311
10312                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
10313                         enum transcoder master = new_crtc_state->mst_master_transcoder;
10314
10315                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
10316                                 new_crtc_state->uapi.mode_changed = true;
10317                                 new_crtc_state->update_pipe = false;
10318                         }
10319                 }
10320
10321                 if (is_trans_port_sync_mode(new_crtc_state)) {
10322                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
10323
10324                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
10325                                 trans |= BIT(new_crtc_state->master_transcoder);
10326
10327                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
10328                                 new_crtc_state->uapi.mode_changed = true;
10329                                 new_crtc_state->update_pipe = false;
10330                         }
10331                 }
10332
10333                 if (new_crtc_state->bigjoiner) {
10334                         struct intel_crtc_state *linked_crtc_state =
10335                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
10336
10337                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
10338                                 new_crtc_state->uapi.mode_changed = true;
10339                                 new_crtc_state->update_pipe = false;
10340                         }
10341                 }
10342         }
10343
10344         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10345                                             new_crtc_state, i) {
10346                 if (intel_crtc_needs_modeset(new_crtc_state)) {
10347                         any_ms = true;
10348                         continue;
10349                 }
10350
10351                 if (!new_crtc_state->update_pipe)
10352                         continue;
10353
10354                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
10355         }
10356
10357         if (any_ms && !check_digital_port_conflicts(state)) {
10358                 drm_dbg_kms(&dev_priv->drm,
10359                             "rejecting conflicting digital port configuration\n");
10360                 ret = -EINVAL;
10361                 goto fail;
10362         }
10363
10364         ret = drm_dp_mst_atomic_check(&state->base);
10365         if (ret)
10366                 goto fail;
10367
10368         ret = intel_atomic_check_planes(state);
10369         if (ret)
10370                 goto fail;
10371
10372         intel_fbc_choose_crtc(dev_priv, state);
10373         ret = calc_watermark_data(state);
10374         if (ret)
10375                 goto fail;
10376
10377         ret = intel_bw_atomic_check(state);
10378         if (ret)
10379                 goto fail;
10380
10381         ret = intel_atomic_check_cdclk(state, &any_ms);
10382         if (ret)
10383                 goto fail;
10384
10385         if (any_ms) {
10386                 ret = intel_modeset_checks(state);
10387                 if (ret)
10388                         goto fail;
10389
10390                 ret = intel_modeset_calc_cdclk(state);
10391                 if (ret)
10392                         return ret;
10393
10394                 intel_modeset_clear_plls(state);
10395         }
10396
10397         ret = intel_atomic_check_crtcs(state);
10398         if (ret)
10399                 goto fail;
10400
10401         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10402                                             new_crtc_state, i) {
10403                 if (new_crtc_state->uapi.async_flip) {
10404                         ret = intel_atomic_check_async(state);
10405                         if (ret)
10406                                 goto fail;
10407                 }
10408
10409                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
10410                     !new_crtc_state->update_pipe)
10411                         continue;
10412
10413                 intel_dump_pipe_config(new_crtc_state, state,
10414                                        intel_crtc_needs_modeset(new_crtc_state) ?
10415                                        "[modeset]" : "[fastset]");
10416         }
10417
10418         return 0;
10419
10420  fail:
10421         if (ret == -EDEADLK)
10422                 return ret;
10423
10424         /*
10425          * FIXME would probably be nice to know which crtc specifically
10426          * caused the failure, in cases where we can pinpoint it.
10427          */
10428         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10429                                             new_crtc_state, i)
10430                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10431
10432         return ret;
10433 }
10434
10435 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10436 {
10437         struct intel_crtc_state *crtc_state;
10438         struct intel_crtc *crtc;
10439         int i, ret;
10440
10441         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10442         if (ret < 0)
10443                 return ret;
10444
10445         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10446                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10447
10448                 if (mode_changed || crtc_state->update_pipe ||
10449                     crtc_state->uapi.color_mgmt_changed) {
10450                         intel_dsb_prepare(crtc_state);
10451                 }
10452         }
10453
10454         return 0;
10455 }
10456
10457 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10458                                   struct intel_crtc_state *crtc_state)
10459 {
10460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10461
10462         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
10463                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10464
10465         if (crtc_state->has_pch_encoder) {
10466                 enum pipe pch_transcoder =
10467                         intel_crtc_pch_transcoder(crtc);
10468
10469                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10470         }
10471 }
10472
10473 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10474                                const struct intel_crtc_state *new_crtc_state)
10475 {
10476         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10477         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10478
10479         /*
10480          * Update pipe size and adjust fitter if needed: the reason for this is
10481          * that in compute_mode_changes we check the native mode (not the pfit
10482          * mode) to see if we can flip rather than do a full mode set. In the
10483          * fastboot case, we'll flip, but if we don't update the pipesrc and
10484          * pfit state, we'll end up with a big fb scanned out into the wrong
10485          * sized surface.
10486          */
10487         intel_set_pipe_src_size(new_crtc_state);
10488
10489         /* on skylake this is done by detaching scalers */
10490         if (INTEL_GEN(dev_priv) >= 9) {
10491                 skl_detach_scalers(new_crtc_state);
10492
10493                 if (new_crtc_state->pch_pfit.enabled)
10494                         skl_pfit_enable(new_crtc_state);
10495         } else if (HAS_PCH_SPLIT(dev_priv)) {
10496                 if (new_crtc_state->pch_pfit.enabled)
10497                         ilk_pfit_enable(new_crtc_state);
10498                 else if (old_crtc_state->pch_pfit.enabled)
10499                         ilk_pfit_disable(old_crtc_state);
10500         }
10501
10502         /*
10503          * The register is supposedly single buffered so perhaps
10504          * not 100% correct to do this here. But SKL+ calculate
10505          * this based on the adjust pixel rate so pfit changes do
10506          * affect it and so it must be updated for fastsets.
10507          * HSW/BDW only really need this here for fastboot, after
10508          * that the value should not change without a full modeset.
10509          */
10510         if (INTEL_GEN(dev_priv) >= 9 ||
10511             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10512                 hsw_set_linetime_wm(new_crtc_state);
10513
10514         if (INTEL_GEN(dev_priv) >= 11)
10515                 icl_set_pipe_chicken(crtc);
10516 }
10517
10518 static void commit_pipe_config(struct intel_atomic_state *state,
10519                                struct intel_crtc *crtc)
10520 {
10521         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10522         const struct intel_crtc_state *old_crtc_state =
10523                 intel_atomic_get_old_crtc_state(state, crtc);
10524         const struct intel_crtc_state *new_crtc_state =
10525                 intel_atomic_get_new_crtc_state(state, crtc);
10526         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10527
10528         /*
10529          * During modesets pipe configuration was programmed as the
10530          * CRTC was enabled.
10531          */
10532         if (!modeset) {
10533                 if (new_crtc_state->uapi.color_mgmt_changed ||
10534                     new_crtc_state->update_pipe)
10535                         intel_color_commit(new_crtc_state);
10536
10537                 if (INTEL_GEN(dev_priv) >= 9)
10538                         skl_detach_scalers(new_crtc_state);
10539
10540                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10541                         bdw_set_pipemisc(new_crtc_state);
10542
10543                 if (new_crtc_state->update_pipe)
10544                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
10545
10546                 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10547         }
10548
10549         if (dev_priv->display.atomic_update_watermarks)
10550                 dev_priv->display.atomic_update_watermarks(state, crtc);
10551 }
10552
10553 static void intel_enable_crtc(struct intel_atomic_state *state,
10554                               struct intel_crtc *crtc)
10555 {
10556         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10557         const struct intel_crtc_state *new_crtc_state =
10558                 intel_atomic_get_new_crtc_state(state, crtc);
10559
10560         if (!intel_crtc_needs_modeset(new_crtc_state))
10561                 return;
10562
10563         intel_crtc_update_active_timings(new_crtc_state);
10564
10565         dev_priv->display.crtc_enable(state, crtc);
10566
10567         if (new_crtc_state->bigjoiner_slave)
10568                 return;
10569
10570         /* vblanks work again, re-enable pipe CRC. */
10571         intel_crtc_enable_pipe_crc(crtc);
10572 }
10573
10574 static void intel_update_crtc(struct intel_atomic_state *state,
10575                               struct intel_crtc *crtc)
10576 {
10577         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10578         const struct intel_crtc_state *old_crtc_state =
10579                 intel_atomic_get_old_crtc_state(state, crtc);
10580         struct intel_crtc_state *new_crtc_state =
10581                 intel_atomic_get_new_crtc_state(state, crtc);
10582         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10583
10584         if (!modeset) {
10585                 if (new_crtc_state->preload_luts &&
10586                     (new_crtc_state->uapi.color_mgmt_changed ||
10587                      new_crtc_state->update_pipe))
10588                         intel_color_load_luts(new_crtc_state);
10589
10590                 intel_pre_plane_update(state, crtc);
10591
10592                 if (new_crtc_state->update_pipe)
10593                         intel_encoders_update_pipe(state, crtc);
10594         }
10595
10596         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10597                 intel_fbc_disable(crtc);
10598         else
10599                 intel_fbc_enable(state, crtc);
10600
10601         /* Perform vblank evasion around commit operation */
10602         intel_pipe_update_start(new_crtc_state);
10603
10604         commit_pipe_config(state, crtc);
10605
10606         if (INTEL_GEN(dev_priv) >= 9)
10607                 skl_update_planes_on_crtc(state, crtc);
10608         else
10609                 i9xx_update_planes_on_crtc(state, crtc);
10610
10611         intel_pipe_update_end(new_crtc_state);
10612
10613         /*
10614          * We usually enable FIFO underrun interrupts as part of the
10615          * CRTC enable sequence during modesets.  But when we inherit a
10616          * valid pipe configuration from the BIOS we need to take care
10617          * of enabling them on the CRTC's first fastset.
10618          */
10619         if (new_crtc_state->update_pipe && !modeset &&
10620             old_crtc_state->inherited)
10621                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10622 }
10623
10624 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10625                                           struct intel_crtc_state *old_crtc_state,
10626                                           struct intel_crtc_state *new_crtc_state,
10627                                           struct intel_crtc *crtc)
10628 {
10629         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10630
10631         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10632
10633         intel_crtc_disable_planes(state, crtc);
10634
10635         /*
10636          * We still need special handling for disabling bigjoiner master
10637          * and slaves since for slave we do not have encoder or plls
10638          * so we dont need to disable those.
10639          */
10640         if (old_crtc_state->bigjoiner) {
10641                 intel_crtc_disable_planes(state,
10642                                           old_crtc_state->bigjoiner_linked_crtc);
10643                 old_crtc_state->bigjoiner_linked_crtc->active = false;
10644         }
10645
10646         /*
10647          * We need to disable pipe CRC before disabling the pipe,
10648          * or we race against vblank off.
10649          */
10650         intel_crtc_disable_pipe_crc(crtc);
10651
10652         dev_priv->display.crtc_disable(state, crtc);
10653         crtc->active = false;
10654         intel_fbc_disable(crtc);
10655         intel_disable_shared_dpll(old_crtc_state);
10656
10657         /* FIXME unify this for all platforms */
10658         if (!new_crtc_state->hw.active &&
10659             !HAS_GMCH(dev_priv) &&
10660             dev_priv->display.initial_watermarks)
10661                 dev_priv->display.initial_watermarks(state, crtc);
10662 }
10663
10664 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10665 {
10666         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10667         struct intel_crtc *crtc;
10668         u32 handled = 0;
10669         int i;
10670
10671         /* Only disable port sync and MST slaves */
10672         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10673                                             new_crtc_state, i) {
10674                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10675                         continue;
10676
10677                 if (!old_crtc_state->hw.active)
10678                         continue;
10679
10680                 /* In case of Transcoder port Sync master slave CRTCs can be
10681                  * assigned in any order and we need to make sure that
10682                  * slave CRTCs are disabled first and then master CRTC since
10683                  * Slave vblanks are masked till Master Vblanks.
10684                  */
10685                 if (!is_trans_port_sync_slave(old_crtc_state) &&
10686                     !intel_dp_mst_is_slave_trans(old_crtc_state))
10687                         continue;
10688
10689                 intel_pre_plane_update(state, crtc);
10690                 intel_old_crtc_state_disables(state, old_crtc_state,
10691                                               new_crtc_state, crtc);
10692                 handled |= BIT(crtc->pipe);
10693         }
10694
10695         /* Disable everything else left on */
10696         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10697                                             new_crtc_state, i) {
10698                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
10699                     (handled & BIT(crtc->pipe)) ||
10700                     old_crtc_state->bigjoiner_slave)
10701                         continue;
10702
10703                 intel_pre_plane_update(state, crtc);
10704                 if (old_crtc_state->bigjoiner) {
10705                         struct intel_crtc *slave =
10706                                 old_crtc_state->bigjoiner_linked_crtc;
10707
10708                         intel_pre_plane_update(state, slave);
10709                 }
10710
10711                 if (old_crtc_state->hw.active)
10712                         intel_old_crtc_state_disables(state, old_crtc_state,
10713                                                       new_crtc_state, crtc);
10714         }
10715 }
10716
10717 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10718 {
10719         struct intel_crtc_state *new_crtc_state;
10720         struct intel_crtc *crtc;
10721         int i;
10722
10723         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10724                 if (!new_crtc_state->hw.active)
10725                         continue;
10726
10727                 intel_enable_crtc(state, crtc);
10728                 intel_update_crtc(state, crtc);
10729         }
10730 }
10731
10732 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10733 {
10734         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10735         struct intel_crtc *crtc;
10736         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10737         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10738         u8 update_pipes = 0, modeset_pipes = 0;
10739         int i;
10740
10741         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10742                 enum pipe pipe = crtc->pipe;
10743
10744                 if (!new_crtc_state->hw.active)
10745                         continue;
10746
10747                 /* ignore allocations for crtc's that have been turned off. */
10748                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
10749                         entries[pipe] = old_crtc_state->wm.skl.ddb;
10750                         update_pipes |= BIT(pipe);
10751                 } else {
10752                         modeset_pipes |= BIT(pipe);
10753                 }
10754         }
10755
10756         /*
10757          * Whenever the number of active pipes changes, we need to make sure we
10758          * update the pipes in the right order so that their ddb allocations
10759          * never overlap with each other between CRTC updates. Otherwise we'll
10760          * cause pipe underruns and other bad stuff.
10761          *
10762          * So first lets enable all pipes that do not need a fullmodeset as
10763          * those don't have any external dependency.
10764          */
10765         while (update_pipes) {
10766                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10767                                                     new_crtc_state, i) {
10768                         enum pipe pipe = crtc->pipe;
10769
10770                         if ((update_pipes & BIT(pipe)) == 0)
10771                                 continue;
10772
10773                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10774                                                         entries, I915_MAX_PIPES, pipe))
10775                                 continue;
10776
10777                         entries[pipe] = new_crtc_state->wm.skl.ddb;
10778                         update_pipes &= ~BIT(pipe);
10779
10780                         intel_update_crtc(state, crtc);
10781
10782                         /*
10783                          * If this is an already active pipe, it's DDB changed,
10784                          * and this isn't the last pipe that needs updating
10785                          * then we need to wait for a vblank to pass for the
10786                          * new ddb allocation to take effect.
10787                          */
10788                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10789                                                  &old_crtc_state->wm.skl.ddb) &&
10790                             (update_pipes | modeset_pipes))
10791                                 intel_wait_for_vblank(dev_priv, pipe);
10792                 }
10793         }
10794
10795         update_pipes = modeset_pipes;
10796
10797         /*
10798          * Enable all pipes that needs a modeset and do not depends on other
10799          * pipes
10800          */
10801         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10802                 enum pipe pipe = crtc->pipe;
10803
10804                 if ((modeset_pipes & BIT(pipe)) == 0)
10805                         continue;
10806
10807                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10808                     is_trans_port_sync_master(new_crtc_state) ||
10809                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10810                         continue;
10811
10812                 modeset_pipes &= ~BIT(pipe);
10813
10814                 intel_enable_crtc(state, crtc);
10815         }
10816
10817         /*
10818          * Then we enable all remaining pipes that depend on other
10819          * pipes: MST slaves and port sync masters, big joiner master
10820          */
10821         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10822                 enum pipe pipe = crtc->pipe;
10823
10824                 if ((modeset_pipes & BIT(pipe)) == 0)
10825                         continue;
10826
10827                 modeset_pipes &= ~BIT(pipe);
10828
10829                 intel_enable_crtc(state, crtc);
10830         }
10831
10832         /*
10833          * Finally we do the plane updates/etc. for all pipes that got enabled.
10834          */
10835         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10836                 enum pipe pipe = crtc->pipe;
10837
10838                 if ((update_pipes & BIT(pipe)) == 0)
10839                         continue;
10840
10841                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10842                                                                         entries, I915_MAX_PIPES, pipe));
10843
10844                 entries[pipe] = new_crtc_state->wm.skl.ddb;
10845                 update_pipes &= ~BIT(pipe);
10846
10847                 intel_update_crtc(state, crtc);
10848         }
10849
10850         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10851         drm_WARN_ON(&dev_priv->drm, update_pipes);
10852 }
10853
10854 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10855 {
10856         struct intel_atomic_state *state, *next;
10857         struct llist_node *freed;
10858
10859         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10860         llist_for_each_entry_safe(state, next, freed, freed)
10861                 drm_atomic_state_put(&state->base);
10862 }
10863
10864 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10865 {
10866         struct drm_i915_private *dev_priv =
10867                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10868
10869         intel_atomic_helper_free_state(dev_priv);
10870 }
10871
10872 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10873 {
10874         struct wait_queue_entry wait_fence, wait_reset;
10875         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10876
10877         init_wait_entry(&wait_fence, 0);
10878         init_wait_entry(&wait_reset, 0);
10879         for (;;) {
10880                 prepare_to_wait(&intel_state->commit_ready.wait,
10881                                 &wait_fence, TASK_UNINTERRUPTIBLE);
10882                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10883                                               I915_RESET_MODESET),
10884                                 &wait_reset, TASK_UNINTERRUPTIBLE);
10885
10886
10887                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
10888                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10889                         break;
10890
10891                 schedule();
10892         }
10893         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10894         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10895                                   I915_RESET_MODESET),
10896                     &wait_reset);
10897 }
10898
10899 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10900 {
10901         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10902         struct intel_crtc *crtc;
10903         int i;
10904
10905         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10906                                             new_crtc_state, i)
10907                 intel_dsb_cleanup(old_crtc_state);
10908 }
10909
10910 static void intel_atomic_cleanup_work(struct work_struct *work)
10911 {
10912         struct intel_atomic_state *state =
10913                 container_of(work, struct intel_atomic_state, base.commit_work);
10914         struct drm_i915_private *i915 = to_i915(state->base.dev);
10915
10916         intel_cleanup_dsbs(state);
10917         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10918         drm_atomic_helper_commit_cleanup_done(&state->base);
10919         drm_atomic_state_put(&state->base);
10920
10921         intel_atomic_helper_free_state(i915);
10922 }
10923
10924 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10925 {
10926         struct drm_i915_private *i915 = to_i915(state->base.dev);
10927         struct intel_plane *plane;
10928         struct intel_plane_state *plane_state;
10929         int i;
10930
10931         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10932                 struct drm_framebuffer *fb = plane_state->hw.fb;
10933                 int ret;
10934
10935                 if (!fb ||
10936                     fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10937                         continue;
10938
10939                 /*
10940                  * The layout of the fast clear color value expected by HW
10941                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10942                  * - 4 x 4 bytes per-channel value
10943                  *   (in surface type specific float/int format provided by the fb user)
10944                  * - 8 bytes native color value used by the display
10945                  *   (converted/written by GPU during a fast clear operation using the
10946                  *    above per-channel values)
10947                  *
10948                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
10949                  * caller made sure that the object is synced wrt. the related color clear value
10950                  * GPU write on it.
10951                  */
10952                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10953                                                      fb->offsets[2] + 16,
10954                                                      &plane_state->ccval,
10955                                                      sizeof(plane_state->ccval));
10956                 /* The above could only fail if the FB obj has an unexpected backing store type. */
10957                 drm_WARN_ON(&i915->drm, ret);
10958         }
10959 }
10960
10961 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10962 {
10963         struct drm_device *dev = state->base.dev;
10964         struct drm_i915_private *dev_priv = to_i915(dev);
10965         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10966         struct intel_crtc *crtc;
10967         u64 put_domains[I915_MAX_PIPES] = {};
10968         intel_wakeref_t wakeref = 0;
10969         int i;
10970
10971         intel_atomic_commit_fence_wait(state);
10972
10973         drm_atomic_helper_wait_for_dependencies(&state->base);
10974
10975         if (state->modeset)
10976                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10977
10978         intel_atomic_prepare_plane_clear_colors(state);
10979
10980         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10981                                             new_crtc_state, i) {
10982                 if (intel_crtc_needs_modeset(new_crtc_state) ||
10983                     new_crtc_state->update_pipe) {
10984
10985                         put_domains[crtc->pipe] =
10986                                 modeset_get_crtc_power_domains(new_crtc_state);
10987                 }
10988         }
10989
10990         intel_commit_modeset_disables(state);
10991
10992         /* FIXME: Eventually get rid of our crtc->config pointer */
10993         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10994                 crtc->config = new_crtc_state;
10995
10996         if (state->modeset) {
10997                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10998
10999                 intel_set_cdclk_pre_plane_update(state);
11000
11001                 intel_modeset_verify_disabled(dev_priv, state);
11002         }
11003
11004         intel_sagv_pre_plane_update(state);
11005
11006         /* Complete the events for pipes that have now been disabled */
11007         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11008                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11009
11010                 /* Complete events for now disable pipes here. */
11011                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
11012                         spin_lock_irq(&dev->event_lock);
11013                         drm_crtc_send_vblank_event(&crtc->base,
11014                                                    new_crtc_state->uapi.event);
11015                         spin_unlock_irq(&dev->event_lock);
11016
11017                         new_crtc_state->uapi.event = NULL;
11018                 }
11019         }
11020
11021         if (state->modeset)
11022                 intel_encoders_update_prepare(state);
11023
11024         intel_dbuf_pre_plane_update(state);
11025
11026         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11027                 if (new_crtc_state->uapi.async_flip)
11028                         intel_crtc_enable_flip_done(state, crtc);
11029         }
11030
11031         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
11032         dev_priv->display.commit_modeset_enables(state);
11033
11034         if (state->modeset) {
11035                 intel_encoders_update_complete(state);
11036
11037                 intel_set_cdclk_post_plane_update(state);
11038         }
11039
11040         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
11041          * already, but still need the state for the delayed optimization. To
11042          * fix this:
11043          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
11044          * - schedule that vblank worker _before_ calling hw_done
11045          * - at the start of commit_tail, cancel it _synchrously
11046          * - switch over to the vblank wait helper in the core after that since
11047          *   we don't need out special handling any more.
11048          */
11049         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
11050
11051         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11052                 if (new_crtc_state->uapi.async_flip)
11053                         intel_crtc_disable_flip_done(state, crtc);
11054
11055                 if (new_crtc_state->hw.active &&
11056                     !intel_crtc_needs_modeset(new_crtc_state) &&
11057                     !new_crtc_state->preload_luts &&
11058                     (new_crtc_state->uapi.color_mgmt_changed ||
11059                      new_crtc_state->update_pipe))
11060                         intel_color_load_luts(new_crtc_state);
11061         }
11062
11063         /*
11064          * Now that the vblank has passed, we can go ahead and program the
11065          * optimal watermarks on platforms that need two-step watermark
11066          * programming.
11067          *
11068          * TODO: Move this (and other cleanup) to an async worker eventually.
11069          */
11070         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11071                                             new_crtc_state, i) {
11072                 /*
11073                  * Gen2 reports pipe underruns whenever all planes are disabled.
11074                  * So re-enable underrun reporting after some planes get enabled.
11075                  *
11076                  * We do this before .optimize_watermarks() so that we have a
11077                  * chance of catching underruns with the intermediate watermarks
11078                  * vs. the new plane configuration.
11079                  */
11080                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
11081                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
11082
11083                 if (dev_priv->display.optimize_watermarks)
11084                         dev_priv->display.optimize_watermarks(state, crtc);
11085         }
11086
11087         intel_dbuf_post_plane_update(state);
11088
11089         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11090                 intel_post_plane_update(state, crtc);
11091
11092                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
11093
11094                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
11095
11096                 /*
11097                  * DSB cleanup is done in cleanup_work aligning with framebuffer
11098                  * cleanup. So copy and reset the dsb structure to sync with
11099                  * commit_done and later do dsb cleanup in cleanup_work.
11100                  */
11101                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
11102         }
11103
11104         /* Underruns don't always raise interrupts, so check manually */
11105         intel_check_cpu_fifo_underruns(dev_priv);
11106         intel_check_pch_fifo_underruns(dev_priv);
11107
11108         if (state->modeset)
11109                 intel_verify_planes(state);
11110
11111         intel_sagv_post_plane_update(state);
11112
11113         drm_atomic_helper_commit_hw_done(&state->base);
11114
11115         if (state->modeset) {
11116                 /* As one of the primary mmio accessors, KMS has a high
11117                  * likelihood of triggering bugs in unclaimed access. After we
11118                  * finish modesetting, see if an error has been flagged, and if
11119                  * so enable debugging for the next modeset - and hope we catch
11120                  * the culprit.
11121                  */
11122                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
11123                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
11124         }
11125         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11126
11127         /*
11128          * Defer the cleanup of the old state to a separate worker to not
11129          * impede the current task (userspace for blocking modesets) that
11130          * are executed inline. For out-of-line asynchronous modesets/flips,
11131          * deferring to a new worker seems overkill, but we would place a
11132          * schedule point (cond_resched()) here anyway to keep latencies
11133          * down.
11134          */
11135         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
11136         queue_work(system_highpri_wq, &state->base.commit_work);
11137 }
11138
11139 static void intel_atomic_commit_work(struct work_struct *work)
11140 {
11141         struct intel_atomic_state *state =
11142                 container_of(work, struct intel_atomic_state, base.commit_work);
11143
11144         intel_atomic_commit_tail(state);
11145 }
11146
11147 static int __i915_sw_fence_call
11148 intel_atomic_commit_ready(struct i915_sw_fence *fence,
11149                           enum i915_sw_fence_notify notify)
11150 {
11151         struct intel_atomic_state *state =
11152                 container_of(fence, struct intel_atomic_state, commit_ready);
11153
11154         switch (notify) {
11155         case FENCE_COMPLETE:
11156                 /* we do blocking waits in the worker, nothing to do here */
11157                 break;
11158         case FENCE_FREE:
11159                 {
11160                         struct intel_atomic_helper *helper =
11161                                 &to_i915(state->base.dev)->atomic_helper;
11162
11163                         if (llist_add(&state->freed, &helper->free_list))
11164                                 schedule_work(&helper->free_work);
11165                         break;
11166                 }
11167         }
11168
11169         return NOTIFY_DONE;
11170 }
11171
11172 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
11173 {
11174         struct intel_plane_state *old_plane_state, *new_plane_state;
11175         struct intel_plane *plane;
11176         int i;
11177
11178         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
11179                                              new_plane_state, i)
11180                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
11181                                         to_intel_frontbuffer(new_plane_state->hw.fb),
11182                                         plane->frontbuffer_bit);
11183 }
11184
11185 static int intel_atomic_commit(struct drm_device *dev,
11186                                struct drm_atomic_state *_state,
11187                                bool nonblock)
11188 {
11189         struct intel_atomic_state *state = to_intel_atomic_state(_state);
11190         struct drm_i915_private *dev_priv = to_i915(dev);
11191         int ret = 0;
11192
11193         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
11194
11195         drm_atomic_state_get(&state->base);
11196         i915_sw_fence_init(&state->commit_ready,
11197                            intel_atomic_commit_ready);
11198
11199         /*
11200          * The intel_legacy_cursor_update() fast path takes care
11201          * of avoiding the vblank waits for simple cursor
11202          * movement and flips. For cursor on/off and size changes,
11203          * we want to perform the vblank waits so that watermark
11204          * updates happen during the correct frames. Gen9+ have
11205          * double buffered watermarks and so shouldn't need this.
11206          *
11207          * Unset state->legacy_cursor_update before the call to
11208          * drm_atomic_helper_setup_commit() because otherwise
11209          * drm_atomic_helper_wait_for_flip_done() is a noop and
11210          * we get FIFO underruns because we didn't wait
11211          * for vblank.
11212          *
11213          * FIXME doing watermarks and fb cleanup from a vblank worker
11214          * (assuming we had any) would solve these problems.
11215          */
11216         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
11217                 struct intel_crtc_state *new_crtc_state;
11218                 struct intel_crtc *crtc;
11219                 int i;
11220
11221                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11222                         if (new_crtc_state->wm.need_postvbl_update ||
11223                             new_crtc_state->update_wm_post)
11224                                 state->base.legacy_cursor_update = false;
11225         }
11226
11227         ret = intel_atomic_prepare_commit(state);
11228         if (ret) {
11229                 drm_dbg_atomic(&dev_priv->drm,
11230                                "Preparing state failed with %i\n", ret);
11231                 i915_sw_fence_commit(&state->commit_ready);
11232                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11233                 return ret;
11234         }
11235
11236         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
11237         if (!ret)
11238                 ret = drm_atomic_helper_swap_state(&state->base, true);
11239         if (!ret)
11240                 intel_atomic_swap_global_state(state);
11241
11242         if (ret) {
11243                 struct intel_crtc_state *new_crtc_state;
11244                 struct intel_crtc *crtc;
11245                 int i;
11246
11247                 i915_sw_fence_commit(&state->commit_ready);
11248
11249                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
11250                         intel_dsb_cleanup(new_crtc_state);
11251
11252                 drm_atomic_helper_cleanup_planes(dev, &state->base);
11253                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
11254                 return ret;
11255         }
11256         intel_shared_dpll_swap_state(state);
11257         intel_atomic_track_fbs(state);
11258
11259         drm_atomic_state_get(&state->base);
11260         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
11261
11262         i915_sw_fence_commit(&state->commit_ready);
11263         if (nonblock && state->modeset) {
11264                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
11265         } else if (nonblock) {
11266                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
11267         } else {
11268                 if (state->modeset)
11269                         flush_workqueue(dev_priv->modeset_wq);
11270                 intel_atomic_commit_tail(state);
11271         }
11272
11273         return 0;
11274 }
11275
11276 struct wait_rps_boost {
11277         struct wait_queue_entry wait;
11278
11279         struct drm_crtc *crtc;
11280         struct i915_request *request;
11281 };
11282
11283 static int do_rps_boost(struct wait_queue_entry *_wait,
11284                         unsigned mode, int sync, void *key)
11285 {
11286         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
11287         struct i915_request *rq = wait->request;
11288
11289         /*
11290          * If we missed the vblank, but the request is already running it
11291          * is reasonable to assume that it will complete before the next
11292          * vblank without our intervention, so leave RPS alone.
11293          */
11294         if (!i915_request_started(rq))
11295                 intel_rps_boost(rq);
11296         i915_request_put(rq);
11297
11298         drm_crtc_vblank_put(wait->crtc);
11299
11300         list_del(&wait->wait.entry);
11301         kfree(wait);
11302         return 1;
11303 }
11304
11305 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
11306                                        struct dma_fence *fence)
11307 {
11308         struct wait_rps_boost *wait;
11309
11310         if (!dma_fence_is_i915(fence))
11311                 return;
11312
11313         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
11314                 return;
11315
11316         if (drm_crtc_vblank_get(crtc))
11317                 return;
11318
11319         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
11320         if (!wait) {
11321                 drm_crtc_vblank_put(crtc);
11322                 return;
11323         }
11324
11325         wait->request = to_request(dma_fence_get(fence));
11326         wait->crtc = crtc;
11327
11328         wait->wait.func = do_rps_boost;
11329         wait->wait.flags = 0;
11330
11331         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
11332 }
11333
11334 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
11335 {
11336         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11337         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11338         struct drm_framebuffer *fb = plane_state->hw.fb;
11339         struct i915_vma *vma;
11340
11341         if (plane->id == PLANE_CURSOR &&
11342             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
11343                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11344                 const int align = intel_cursor_alignment(dev_priv);
11345                 int err;
11346
11347                 err = i915_gem_object_attach_phys(obj, align);
11348                 if (err)
11349                         return err;
11350         }
11351
11352         vma = intel_pin_and_fence_fb_obj(fb,
11353                                          &plane_state->view,
11354                                          intel_plane_uses_fence(plane_state),
11355                                          &plane_state->flags);
11356         if (IS_ERR(vma))
11357                 return PTR_ERR(vma);
11358
11359         plane_state->vma = vma;
11360
11361         return 0;
11362 }
11363
11364 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
11365 {
11366         struct i915_vma *vma;
11367
11368         vma = fetch_and_zero(&old_plane_state->vma);
11369         if (vma)
11370                 intel_unpin_fb_vma(vma, old_plane_state->flags);
11371 }
11372
11373 /**
11374  * intel_prepare_plane_fb - Prepare fb for usage on plane
11375  * @_plane: drm plane to prepare for
11376  * @_new_plane_state: the plane state being prepared
11377  *
11378  * Prepares a framebuffer for usage on a display plane.  Generally this
11379  * involves pinning the underlying object and updating the frontbuffer tracking
11380  * bits.  Some older platforms need special physical address handling for
11381  * cursor planes.
11382  *
11383  * Returns 0 on success, negative error code on failure.
11384  */
11385 int
11386 intel_prepare_plane_fb(struct drm_plane *_plane,
11387                        struct drm_plane_state *_new_plane_state)
11388 {
11389         struct i915_sched_attr attr = {
11390                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
11391         };
11392         struct intel_plane *plane = to_intel_plane(_plane);
11393         struct intel_plane_state *new_plane_state =
11394                 to_intel_plane_state(_new_plane_state);
11395         struct intel_atomic_state *state =
11396                 to_intel_atomic_state(new_plane_state->uapi.state);
11397         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11398         const struct intel_plane_state *old_plane_state =
11399                 intel_atomic_get_old_plane_state(state, plane);
11400         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11401         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11402         int ret;
11403
11404         if (old_obj) {
11405                 const struct intel_crtc_state *crtc_state =
11406                         intel_atomic_get_new_crtc_state(state,
11407                                                         to_intel_crtc(old_plane_state->hw.crtc));
11408
11409                 /* Big Hammer, we also need to ensure that any pending
11410                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11411                  * current scanout is retired before unpinning the old
11412                  * framebuffer. Note that we rely on userspace rendering
11413                  * into the buffer attached to the pipe they are waiting
11414                  * on. If not, userspace generates a GPU hang with IPEHR
11415                  * point to the MI_WAIT_FOR_EVENT.
11416                  *
11417                  * This should only fail upon a hung GPU, in which case we
11418                  * can safely continue.
11419                  */
11420                 if (intel_crtc_needs_modeset(crtc_state)) {
11421                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
11422                                                               old_obj->base.resv, NULL,
11423                                                               false, 0,
11424                                                               GFP_KERNEL);
11425                         if (ret < 0)
11426                                 return ret;
11427                 }
11428         }
11429
11430         if (new_plane_state->uapi.fence) { /* explicit fencing */
11431                 i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11432                                              &attr);
11433                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11434                                                     new_plane_state->uapi.fence,
11435                                                     i915_fence_timeout(dev_priv),
11436                                                     GFP_KERNEL);
11437                 if (ret < 0)
11438                         return ret;
11439         }
11440
11441         if (!obj)
11442                 return 0;
11443
11444         ret = i915_gem_object_pin_pages(obj);
11445         if (ret)
11446                 return ret;
11447
11448         ret = intel_plane_pin_fb(new_plane_state);
11449
11450         i915_gem_object_unpin_pages(obj);
11451         if (ret)
11452                 return ret;
11453
11454         i915_gem_object_wait_priority(obj, 0, &attr);
11455         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11456
11457         if (!new_plane_state->uapi.fence) { /* implicit fencing */
11458                 struct dma_fence *fence;
11459
11460                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
11461                                                       obj->base.resv, NULL,
11462                                                       false,
11463                                                       i915_fence_timeout(dev_priv),
11464                                                       GFP_KERNEL);
11465                 if (ret < 0)
11466                         goto unpin_fb;
11467
11468                 fence = dma_resv_get_excl_rcu(obj->base.resv);
11469                 if (fence) {
11470                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11471                                                    fence);
11472                         dma_fence_put(fence);
11473                 }
11474         } else {
11475                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11476                                            new_plane_state->uapi.fence);
11477         }
11478
11479         /*
11480          * We declare pageflips to be interactive and so merit a small bias
11481          * towards upclocking to deliver the frame on time. By only changing
11482          * the RPS thresholds to sample more regularly and aim for higher
11483          * clocks we can hopefully deliver low power workloads (like kodi)
11484          * that are not quite steady state without resorting to forcing
11485          * maximum clocks following a vblank miss (see do_rps_boost()).
11486          */
11487         if (!state->rps_interactive) {
11488                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11489                 state->rps_interactive = true;
11490         }
11491
11492         return 0;
11493
11494 unpin_fb:
11495         intel_plane_unpin_fb(new_plane_state);
11496
11497         return ret;
11498 }
11499
11500 /**
11501  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11502  * @plane: drm plane to clean up for
11503  * @_old_plane_state: the state from the previous modeset
11504  *
11505  * Cleans up a framebuffer that has just been removed from a plane.
11506  */
11507 void
11508 intel_cleanup_plane_fb(struct drm_plane *plane,
11509                        struct drm_plane_state *_old_plane_state)
11510 {
11511         struct intel_plane_state *old_plane_state =
11512                 to_intel_plane_state(_old_plane_state);
11513         struct intel_atomic_state *state =
11514                 to_intel_atomic_state(old_plane_state->uapi.state);
11515         struct drm_i915_private *dev_priv = to_i915(plane->dev);
11516         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11517
11518         if (!obj)
11519                 return;
11520
11521         if (state->rps_interactive) {
11522                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11523                 state->rps_interactive = false;
11524         }
11525
11526         /* Should only be called after a successful intel_prepare_plane_fb()! */
11527         intel_plane_unpin_fb(old_plane_state);
11528 }
11529
11530 /**
11531  * intel_plane_destroy - destroy a plane
11532  * @plane: plane to destroy
11533  *
11534  * Common destruction function for all types of planes (primary, cursor,
11535  * sprite).
11536  */
11537 void intel_plane_destroy(struct drm_plane *plane)
11538 {
11539         drm_plane_cleanup(plane);
11540         kfree(to_intel_plane(plane));
11541 }
11542
11543 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11544 {
11545         struct intel_plane *plane;
11546
11547         for_each_intel_plane(&dev_priv->drm, plane) {
11548                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11549                                                                   plane->pipe);
11550
11551                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11552         }
11553 }
11554
11555
11556 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11557                                       struct drm_file *file)
11558 {
11559         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11560         struct drm_crtc *drmmode_crtc;
11561         struct intel_crtc *crtc;
11562
11563         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11564         if (!drmmode_crtc)
11565                 return -ENOENT;
11566
11567         crtc = to_intel_crtc(drmmode_crtc);
11568         pipe_from_crtc_id->pipe = crtc->pipe;
11569
11570         return 0;
11571 }
11572
11573 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11574 {
11575         struct drm_device *dev = encoder->base.dev;
11576         struct intel_encoder *source_encoder;
11577         u32 possible_clones = 0;
11578
11579         for_each_intel_encoder(dev, source_encoder) {
11580                 if (encoders_cloneable(encoder, source_encoder))
11581                         possible_clones |= drm_encoder_mask(&source_encoder->base);
11582         }
11583
11584         return possible_clones;
11585 }
11586
11587 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11588 {
11589         struct drm_device *dev = encoder->base.dev;
11590         struct intel_crtc *crtc;
11591         u32 possible_crtcs = 0;
11592
11593         for_each_intel_crtc(dev, crtc) {
11594                 if (encoder->pipe_mask & BIT(crtc->pipe))
11595                         possible_crtcs |= drm_crtc_mask(&crtc->base);
11596         }
11597
11598         return possible_crtcs;
11599 }
11600
11601 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11602 {
11603         if (!IS_MOBILE(dev_priv))
11604                 return false;
11605
11606         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11607                 return false;
11608
11609         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11610                 return false;
11611
11612         return true;
11613 }
11614
11615 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11616 {
11617         if (INTEL_GEN(dev_priv) >= 9)
11618                 return false;
11619
11620         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11621                 return false;
11622
11623         if (HAS_PCH_LPT_H(dev_priv) &&
11624             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11625                 return false;
11626
11627         /* DDI E can't be used if DDI A requires 4 lanes */
11628         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11629                 return false;
11630
11631         if (!dev_priv->vbt.int_crt_support)
11632                 return false;
11633
11634         return true;
11635 }
11636
11637 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11638 {
11639         struct intel_encoder *encoder;
11640         bool dpd_is_edp = false;
11641
11642         intel_pps_unlock_regs_wa(dev_priv);
11643
11644         if (!HAS_DISPLAY(dev_priv))
11645                 return;
11646
11647         if (IS_ALDERLAKE_S(dev_priv)) {
11648                 intel_ddi_init(dev_priv, PORT_A);
11649                 intel_ddi_init(dev_priv, PORT_TC1);
11650                 intel_ddi_init(dev_priv, PORT_TC2);
11651                 intel_ddi_init(dev_priv, PORT_TC3);
11652                 intel_ddi_init(dev_priv, PORT_TC4);
11653         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11654                 intel_ddi_init(dev_priv, PORT_A);
11655                 intel_ddi_init(dev_priv, PORT_B);
11656                 intel_ddi_init(dev_priv, PORT_TC1);
11657                 intel_ddi_init(dev_priv, PORT_TC2);
11658         } else if (INTEL_GEN(dev_priv) >= 12) {
11659                 intel_ddi_init(dev_priv, PORT_A);
11660                 intel_ddi_init(dev_priv, PORT_B);
11661                 intel_ddi_init(dev_priv, PORT_TC1);
11662                 intel_ddi_init(dev_priv, PORT_TC2);
11663                 intel_ddi_init(dev_priv, PORT_TC3);
11664                 intel_ddi_init(dev_priv, PORT_TC4);
11665                 intel_ddi_init(dev_priv, PORT_TC5);
11666                 intel_ddi_init(dev_priv, PORT_TC6);
11667                 icl_dsi_init(dev_priv);
11668         } else if (IS_JSL_EHL(dev_priv)) {
11669                 intel_ddi_init(dev_priv, PORT_A);
11670                 intel_ddi_init(dev_priv, PORT_B);
11671                 intel_ddi_init(dev_priv, PORT_C);
11672                 intel_ddi_init(dev_priv, PORT_D);
11673                 icl_dsi_init(dev_priv);
11674         } else if (IS_GEN(dev_priv, 11)) {
11675                 intel_ddi_init(dev_priv, PORT_A);
11676                 intel_ddi_init(dev_priv, PORT_B);
11677                 intel_ddi_init(dev_priv, PORT_C);
11678                 intel_ddi_init(dev_priv, PORT_D);
11679                 intel_ddi_init(dev_priv, PORT_E);
11680                 /*
11681                  * On some ICL SKUs port F is not present. No strap bits for
11682                  * this, so rely on VBT.
11683                  * Work around broken VBTs on SKUs known to have no port F.
11684                  */
11685                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
11686                     intel_bios_is_port_present(dev_priv, PORT_F))
11687                         intel_ddi_init(dev_priv, PORT_F);
11688
11689                 icl_dsi_init(dev_priv);
11690         } else if (IS_GEN9_LP(dev_priv)) {
11691                 /*
11692                  * FIXME: Broxton doesn't support port detection via the
11693                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
11694                  * detect the ports.
11695                  */
11696                 intel_ddi_init(dev_priv, PORT_A);
11697                 intel_ddi_init(dev_priv, PORT_B);
11698                 intel_ddi_init(dev_priv, PORT_C);
11699
11700                 vlv_dsi_init(dev_priv);
11701         } else if (HAS_DDI(dev_priv)) {
11702                 int found;
11703
11704                 if (intel_ddi_crt_present(dev_priv))
11705                         intel_crt_init(dev_priv);
11706
11707                 /*
11708                  * Haswell uses DDI functions to detect digital outputs.
11709                  * On SKL pre-D0 the strap isn't connected. Later SKUs may or
11710                  * may not have it - it was supposed to be fixed by the same
11711                  * time we stopped using straps. Assume it's there.
11712                  */
11713                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11714                 /* WaIgnoreDDIAStrap: skl */
11715                 if (found || IS_GEN9_BC(dev_priv))
11716                         intel_ddi_init(dev_priv, PORT_A);
11717
11718                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
11719                  * register */
11720                 if (HAS_PCH_TGP(dev_priv)) {
11721                         /* W/A due to lack of STRAP config on TGP PCH*/
11722                         found = (SFUSE_STRAP_DDIB_DETECTED |
11723                                  SFUSE_STRAP_DDIC_DETECTED |
11724                                  SFUSE_STRAP_DDID_DETECTED);
11725                 } else {
11726                         found = intel_de_read(dev_priv, SFUSE_STRAP);
11727                 }
11728
11729                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11730                         intel_ddi_init(dev_priv, PORT_B);
11731                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11732                         intel_ddi_init(dev_priv, PORT_C);
11733                 if (found & SFUSE_STRAP_DDID_DETECTED)
11734                         intel_ddi_init(dev_priv, PORT_D);
11735                 if (found & SFUSE_STRAP_DDIF_DETECTED)
11736                         intel_ddi_init(dev_priv, PORT_F);
11737                 /*
11738                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
11739                  */
11740                 if (IS_GEN9_BC(dev_priv) &&
11741                     intel_bios_is_port_present(dev_priv, PORT_E))
11742                         intel_ddi_init(dev_priv, PORT_E);
11743
11744         } else if (HAS_PCH_SPLIT(dev_priv)) {
11745                 int found;
11746
11747                 /*
11748                  * intel_edp_init_connector() depends on this completing first,
11749                  * to prevent the registration of both eDP and LVDS and the
11750                  * incorrect sharing of the PPS.
11751                  */
11752                 intel_lvds_init(dev_priv);
11753                 intel_crt_init(dev_priv);
11754
11755                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11756
11757                 if (ilk_has_edp_a(dev_priv))
11758                         g4x_dp_init(dev_priv, DP_A, PORT_A);
11759
11760                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11761                         /* PCH SDVOB multiplex with HDMIB */
11762                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11763                         if (!found)
11764                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11765                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11766                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11767                 }
11768
11769                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11770                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11771
11772                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11773                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11774
11775                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11776                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11777
11778                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11779                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11780         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11781                 bool has_edp, has_port;
11782
11783                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11784                         intel_crt_init(dev_priv);
11785
11786                 /*
11787                  * The DP_DETECTED bit is the latched state of the DDC
11788                  * SDA pin at boot. However since eDP doesn't require DDC
11789                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
11790                  * eDP ports may have been muxed to an alternate function.
11791                  * Thus we can't rely on the DP_DETECTED bit alone to detect
11792                  * eDP ports. Consult the VBT as well as DP_DETECTED to
11793                  * detect eDP ports.
11794                  *
11795                  * Sadly the straps seem to be missing sometimes even for HDMI
11796                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11797                  * and VBT for the presence of the port. Additionally we can't
11798                  * trust the port type the VBT declares as we've seen at least
11799                  * HDMI ports that the VBT claim are DP or eDP.
11800                  */
11801                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11802                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11803                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11804                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11805                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11806                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11807
11808                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11809                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11810                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11811                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11812                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11813                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11814
11815                 if (IS_CHERRYVIEW(dev_priv)) {
11816                         /*
11817                          * eDP not supported on port D,
11818                          * so no need to worry about it
11819                          */
11820                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11821                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11822                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11823                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11824                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11825                 }
11826
11827                 vlv_dsi_init(dev_priv);
11828         } else if (IS_PINEVIEW(dev_priv)) {
11829                 intel_lvds_init(dev_priv);
11830                 intel_crt_init(dev_priv);
11831         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
11832                 bool found = false;
11833
11834                 if (IS_MOBILE(dev_priv))
11835                         intel_lvds_init(dev_priv);
11836
11837                 intel_crt_init(dev_priv);
11838
11839                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11840                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11841                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11842                         if (!found && IS_G4X(dev_priv)) {
11843                                 drm_dbg_kms(&dev_priv->drm,
11844                                             "probing HDMI on SDVOB\n");
11845                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11846                         }
11847
11848                         if (!found && IS_G4X(dev_priv))
11849                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
11850                 }
11851
11852                 /* Before G4X SDVOC doesn't have its own detect register */
11853
11854                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11855                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11856                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11857                 }
11858
11859                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11860
11861                         if (IS_G4X(dev_priv)) {
11862                                 drm_dbg_kms(&dev_priv->drm,
11863                                             "probing HDMI on SDVOC\n");
11864                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11865                         }
11866                         if (IS_G4X(dev_priv))
11867                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
11868                 }
11869
11870                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11871                         g4x_dp_init(dev_priv, DP_D, PORT_D);
11872
11873                 if (SUPPORTS_TV(dev_priv))
11874                         intel_tv_init(dev_priv);
11875         } else if (IS_GEN(dev_priv, 2)) {
11876                 if (IS_I85X(dev_priv))
11877                         intel_lvds_init(dev_priv);
11878
11879                 intel_crt_init(dev_priv);
11880                 intel_dvo_init(dev_priv);
11881         }
11882
11883         for_each_intel_encoder(&dev_priv->drm, encoder) {
11884                 encoder->base.possible_crtcs =
11885                         intel_encoder_possible_crtcs(encoder);
11886                 encoder->base.possible_clones =
11887                         intel_encoder_possible_clones(encoder);
11888         }
11889
11890         intel_init_pch_refclk(dev_priv);
11891
11892         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11893 }
11894
11895 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11896 {
11897         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11898
11899         drm_framebuffer_cleanup(fb);
11900         intel_frontbuffer_put(intel_fb->frontbuffer);
11901
11902         kfree(intel_fb);
11903 }
11904
11905 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11906                                                 struct drm_file *file,
11907                                                 unsigned int *handle)
11908 {
11909         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11910         struct drm_i915_private *i915 = to_i915(obj->base.dev);
11911
11912         if (obj->userptr.mm) {
11913                 drm_dbg(&i915->drm,
11914                         "attempting to use a userptr for a framebuffer, denied\n");
11915                 return -EINVAL;
11916         }
11917
11918         return drm_gem_handle_create(file, &obj->base, handle);
11919 }
11920
11921 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11922                                         struct drm_file *file,
11923                                         unsigned flags, unsigned color,
11924                                         struct drm_clip_rect *clips,
11925                                         unsigned num_clips)
11926 {
11927         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11928
11929         i915_gem_object_flush_if_display(obj);
11930         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11931
11932         return 0;
11933 }
11934
11935 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11936         .destroy = intel_user_framebuffer_destroy,
11937         .create_handle = intel_user_framebuffer_create_handle,
11938         .dirty = intel_user_framebuffer_dirty,
11939 };
11940
11941 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11942                                   struct drm_i915_gem_object *obj,
11943                                   struct drm_mode_fb_cmd2 *mode_cmd)
11944 {
11945         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11946         struct drm_framebuffer *fb = &intel_fb->base;
11947         u32 max_stride;
11948         unsigned int tiling, stride;
11949         int ret = -EINVAL;
11950         int i;
11951
11952         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11953         if (!intel_fb->frontbuffer)
11954                 return -ENOMEM;
11955
11956         i915_gem_object_lock(obj, NULL);
11957         tiling = i915_gem_object_get_tiling(obj);
11958         stride = i915_gem_object_get_stride(obj);
11959         i915_gem_object_unlock(obj);
11960
11961         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11962                 /*
11963                  * If there's a fence, enforce that
11964                  * the fb modifier and tiling mode match.
11965                  */
11966                 if (tiling != I915_TILING_NONE &&
11967                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11968                         drm_dbg_kms(&dev_priv->drm,
11969                                     "tiling_mode doesn't match fb modifier\n");
11970                         goto err;
11971                 }
11972         } else {
11973                 if (tiling == I915_TILING_X) {
11974                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11975                 } else if (tiling == I915_TILING_Y) {
11976                         drm_dbg_kms(&dev_priv->drm,
11977                                     "No Y tiling for legacy addfb\n");
11978                         goto err;
11979                 }
11980         }
11981
11982         if (!drm_any_plane_has_format(&dev_priv->drm,
11983                                       mode_cmd->pixel_format,
11984                                       mode_cmd->modifier[0])) {
11985                 struct drm_format_name_buf format_name;
11986
11987                 drm_dbg_kms(&dev_priv->drm,
11988                             "unsupported pixel format %s / modifier 0x%llx\n",
11989                             drm_get_format_name(mode_cmd->pixel_format,
11990                                                 &format_name),
11991                             mode_cmd->modifier[0]);
11992                 goto err;
11993         }
11994
11995         /*
11996          * gen2/3 display engine uses the fence if present,
11997          * so the tiling mode must match the fb modifier exactly.
11998          */
11999         if (INTEL_GEN(dev_priv) < 4 &&
12000             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
12001                 drm_dbg_kms(&dev_priv->drm,
12002                             "tiling_mode must match fb modifier exactly on gen2/3\n");
12003                 goto err;
12004         }
12005
12006         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
12007                                          mode_cmd->modifier[0]);
12008         if (mode_cmd->pitches[0] > max_stride) {
12009                 drm_dbg_kms(&dev_priv->drm,
12010                             "%s pitch (%u) must be at most %d\n",
12011                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
12012                             "tiled" : "linear",
12013                             mode_cmd->pitches[0], max_stride);
12014                 goto err;
12015         }
12016
12017         /*
12018          * If there's a fence, enforce that
12019          * the fb pitch and fence stride match.
12020          */
12021         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
12022                 drm_dbg_kms(&dev_priv->drm,
12023                             "pitch (%d) must match tiling stride (%d)\n",
12024                             mode_cmd->pitches[0], stride);
12025                 goto err;
12026         }
12027
12028         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12029         if (mode_cmd->offsets[0] != 0) {
12030                 drm_dbg_kms(&dev_priv->drm,
12031                             "plane 0 offset (0x%08x) must be 0\n",
12032                             mode_cmd->offsets[0]);
12033                 goto err;
12034         }
12035
12036         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
12037
12038         for (i = 0; i < fb->format->num_planes; i++) {
12039                 u32 stride_alignment;
12040
12041                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
12042                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
12043                                     i);
12044                         goto err;
12045                 }
12046
12047                 stride_alignment = intel_fb_stride_alignment(fb, i);
12048                 if (fb->pitches[i] & (stride_alignment - 1)) {
12049                         drm_dbg_kms(&dev_priv->drm,
12050                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
12051                                     i, fb->pitches[i], stride_alignment);
12052                         goto err;
12053                 }
12054
12055                 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
12056                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
12057
12058                         if (fb->pitches[i] != ccs_aux_stride) {
12059                                 drm_dbg_kms(&dev_priv->drm,
12060                                             "ccs aux plane %d pitch (%d) must be %d\n",
12061                                             i,
12062                                             fb->pitches[i], ccs_aux_stride);
12063                                 goto err;
12064                         }
12065                 }
12066
12067                 fb->obj[i] = &obj->base;
12068         }
12069
12070         ret = intel_fill_fb_info(dev_priv, fb);
12071         if (ret)
12072                 goto err;
12073
12074         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
12075         if (ret) {
12076                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
12077                 goto err;
12078         }
12079
12080         return 0;
12081
12082 err:
12083         intel_frontbuffer_put(intel_fb->frontbuffer);
12084         return ret;
12085 }
12086
12087 static struct drm_framebuffer *
12088 intel_user_framebuffer_create(struct drm_device *dev,
12089                               struct drm_file *filp,
12090                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
12091 {
12092         struct drm_framebuffer *fb;
12093         struct drm_i915_gem_object *obj;
12094         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
12095
12096         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
12097         if (!obj)
12098                 return ERR_PTR(-ENOENT);
12099
12100         fb = intel_framebuffer_create(obj, &mode_cmd);
12101         i915_gem_object_put(obj);
12102
12103         return fb;
12104 }
12105
12106 static enum drm_mode_status
12107 intel_mode_valid(struct drm_device *dev,
12108                  const struct drm_display_mode *mode)
12109 {
12110         struct drm_i915_private *dev_priv = to_i915(dev);
12111         int hdisplay_max, htotal_max;
12112         int vdisplay_max, vtotal_max;
12113
12114         /*
12115          * Can't reject DBLSCAN here because Xorg ddxen can add piles
12116          * of DBLSCAN modes to the output's mode list when they detect
12117          * the scaling mode property on the connector. And they don't
12118          * ask the kernel to validate those modes in any way until
12119          * modeset time at which point the client gets a protocol error.
12120          * So in order to not upset those clients we silently ignore the
12121          * DBLSCAN flag on such connectors. For other connectors we will
12122          * reject modes with the DBLSCAN flag in encoder->compute_config().
12123          * And we always reject DBLSCAN modes in connector->mode_valid()
12124          * as we never want such modes on the connector's mode list.
12125          */
12126
12127         if (mode->vscan > 1)
12128                 return MODE_NO_VSCAN;
12129
12130         if (mode->flags & DRM_MODE_FLAG_HSKEW)
12131                 return MODE_H_ILLEGAL;
12132
12133         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
12134                            DRM_MODE_FLAG_NCSYNC |
12135                            DRM_MODE_FLAG_PCSYNC))
12136                 return MODE_HSYNC;
12137
12138         if (mode->flags & (DRM_MODE_FLAG_BCAST |
12139                            DRM_MODE_FLAG_PIXMUX |
12140                            DRM_MODE_FLAG_CLKDIV2))
12141                 return MODE_BAD;
12142
12143         /* Transcoder timing limits */
12144         if (INTEL_GEN(dev_priv) >= 11) {
12145                 hdisplay_max = 16384;
12146                 vdisplay_max = 8192;
12147                 htotal_max = 16384;
12148                 vtotal_max = 8192;
12149         } else if (INTEL_GEN(dev_priv) >= 9 ||
12150                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12151                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
12152                 vdisplay_max = 4096;
12153                 htotal_max = 8192;
12154                 vtotal_max = 8192;
12155         } else if (INTEL_GEN(dev_priv) >= 3) {
12156                 hdisplay_max = 4096;
12157                 vdisplay_max = 4096;
12158                 htotal_max = 8192;
12159                 vtotal_max = 8192;
12160         } else {
12161                 hdisplay_max = 2048;
12162                 vdisplay_max = 2048;
12163                 htotal_max = 4096;
12164                 vtotal_max = 4096;
12165         }
12166
12167         if (mode->hdisplay > hdisplay_max ||
12168             mode->hsync_start > htotal_max ||
12169             mode->hsync_end > htotal_max ||
12170             mode->htotal > htotal_max)
12171                 return MODE_H_ILLEGAL;
12172
12173         if (mode->vdisplay > vdisplay_max ||
12174             mode->vsync_start > vtotal_max ||
12175             mode->vsync_end > vtotal_max ||
12176             mode->vtotal > vtotal_max)
12177                 return MODE_V_ILLEGAL;
12178
12179         if (INTEL_GEN(dev_priv) >= 5) {
12180                 if (mode->hdisplay < 64 ||
12181                     mode->htotal - mode->hdisplay < 32)
12182                         return MODE_H_ILLEGAL;
12183
12184                 if (mode->vtotal - mode->vdisplay < 5)
12185                         return MODE_V_ILLEGAL;
12186         } else {
12187                 if (mode->htotal - mode->hdisplay < 32)
12188                         return MODE_H_ILLEGAL;
12189
12190                 if (mode->vtotal - mode->vdisplay < 3)
12191                         return MODE_V_ILLEGAL;
12192         }
12193
12194         return MODE_OK;
12195 }
12196
12197 enum drm_mode_status
12198 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
12199                                 const struct drm_display_mode *mode,
12200                                 bool bigjoiner)
12201 {
12202         int plane_width_max, plane_height_max;
12203
12204         /*
12205          * intel_mode_valid() should be
12206          * sufficient on older platforms.
12207          */
12208         if (INTEL_GEN(dev_priv) < 9)
12209                 return MODE_OK;
12210
12211         /*
12212          * Most people will probably want a fullscreen
12213          * plane so let's not advertize modes that are
12214          * too big for that.
12215          */
12216         if (INTEL_GEN(dev_priv) >= 11) {
12217                 plane_width_max = 5120 << bigjoiner;
12218                 plane_height_max = 4320;
12219         } else {
12220                 plane_width_max = 5120;
12221                 plane_height_max = 4096;
12222         }
12223
12224         if (mode->hdisplay > plane_width_max)
12225                 return MODE_H_ILLEGAL;
12226
12227         if (mode->vdisplay > plane_height_max)
12228                 return MODE_V_ILLEGAL;
12229
12230         return MODE_OK;
12231 }
12232
12233 static const struct drm_mode_config_funcs intel_mode_funcs = {
12234         .fb_create = intel_user_framebuffer_create,
12235         .get_format_info = intel_get_format_info,
12236         .output_poll_changed = intel_fbdev_output_poll_changed,
12237         .mode_valid = intel_mode_valid,
12238         .atomic_check = intel_atomic_check,
12239         .atomic_commit = intel_atomic_commit,
12240         .atomic_state_alloc = intel_atomic_state_alloc,
12241         .atomic_state_clear = intel_atomic_state_clear,
12242         .atomic_state_free = intel_atomic_state_free,
12243 };
12244
12245 /**
12246  * intel_init_display_hooks - initialize the display modesetting hooks
12247  * @dev_priv: device private
12248  */
12249 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
12250 {
12251         intel_init_cdclk_hooks(dev_priv);
12252         intel_init_audio_hooks(dev_priv);
12253
12254         intel_dpll_init_clock_hook(dev_priv);
12255
12256         if (INTEL_GEN(dev_priv) >= 9) {
12257                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12258                 dev_priv->display.crtc_enable = hsw_crtc_enable;
12259                 dev_priv->display.crtc_disable = hsw_crtc_disable;
12260         } else if (HAS_DDI(dev_priv)) {
12261                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
12262                 dev_priv->display.crtc_enable = hsw_crtc_enable;
12263                 dev_priv->display.crtc_disable = hsw_crtc_disable;
12264         } else if (HAS_PCH_SPLIT(dev_priv)) {
12265                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
12266                 dev_priv->display.crtc_enable = ilk_crtc_enable;
12267                 dev_priv->display.crtc_disable = ilk_crtc_disable;
12268         } else if (IS_CHERRYVIEW(dev_priv) ||
12269                    IS_VALLEYVIEW(dev_priv)) {
12270                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12271                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12272                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12273         } else {
12274                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12275                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12276                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12277         }
12278
12279         intel_fdi_init_hook(dev_priv);
12280
12281         if (INTEL_GEN(dev_priv) >= 9) {
12282                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
12283                 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
12284         } else {
12285                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
12286                 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
12287         }
12288
12289 }
12290
12291 void intel_modeset_init_hw(struct drm_i915_private *i915)
12292 {
12293         struct intel_cdclk_state *cdclk_state =
12294                 to_intel_cdclk_state(i915->cdclk.obj.state);
12295
12296         intel_update_cdclk(i915);
12297         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
12298         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
12299 }
12300
12301 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
12302 {
12303         struct drm_plane *plane;
12304         struct intel_crtc *crtc;
12305
12306         for_each_intel_crtc(state->dev, crtc) {
12307                 struct intel_crtc_state *crtc_state;
12308
12309                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
12310                 if (IS_ERR(crtc_state))
12311                         return PTR_ERR(crtc_state);
12312
12313                 if (crtc_state->hw.active) {
12314                         /*
12315                          * Preserve the inherited flag to avoid
12316                          * taking the full modeset path.
12317                          */
12318                         crtc_state->inherited = true;
12319                 }
12320         }
12321
12322         drm_for_each_plane(plane, state->dev) {
12323                 struct drm_plane_state *plane_state;
12324
12325                 plane_state = drm_atomic_get_plane_state(state, plane);
12326                 if (IS_ERR(plane_state))
12327                         return PTR_ERR(plane_state);
12328         }
12329
12330         return 0;
12331 }
12332
12333 /*
12334  * Calculate what we think the watermarks should be for the state we've read
12335  * out of the hardware and then immediately program those watermarks so that
12336  * we ensure the hardware settings match our internal state.
12337  *
12338  * We can calculate what we think WM's should be by creating a duplicate of the
12339  * current state (which was constructed during hardware readout) and running it
12340  * through the atomic check code to calculate new watermark values in the
12341  * state object.
12342  */
12343 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
12344 {
12345         struct drm_atomic_state *state;
12346         struct intel_atomic_state *intel_state;
12347         struct intel_crtc *crtc;
12348         struct intel_crtc_state *crtc_state;
12349         struct drm_modeset_acquire_ctx ctx;
12350         int ret;
12351         int i;
12352
12353         /* Only supported on platforms that use atomic watermark design */
12354         if (!dev_priv->display.optimize_watermarks)
12355                 return;
12356
12357         state = drm_atomic_state_alloc(&dev_priv->drm);
12358         if (drm_WARN_ON(&dev_priv->drm, !state))
12359                 return;
12360
12361         intel_state = to_intel_atomic_state(state);
12362
12363         drm_modeset_acquire_init(&ctx, 0);
12364
12365 retry:
12366         state->acquire_ctx = &ctx;
12367
12368         /*
12369          * Hardware readout is the only time we don't want to calculate
12370          * intermediate watermarks (since we don't trust the current
12371          * watermarks).
12372          */
12373         if (!HAS_GMCH(dev_priv))
12374                 intel_state->skip_intermediate_wm = true;
12375
12376         ret = sanitize_watermarks_add_affected(state);
12377         if (ret)
12378                 goto fail;
12379
12380         ret = intel_atomic_check(&dev_priv->drm, state);
12381         if (ret)
12382                 goto fail;
12383
12384         /* Write calculated watermark values back */
12385         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12386                 crtc_state->wm.need_postvbl_update = true;
12387                 dev_priv->display.optimize_watermarks(intel_state, crtc);
12388
12389                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12390         }
12391
12392 fail:
12393         if (ret == -EDEADLK) {
12394                 drm_atomic_state_clear(state);
12395                 drm_modeset_backoff(&ctx);
12396                 goto retry;
12397         }
12398
12399         /*
12400          * If we fail here, it means that the hardware appears to be
12401          * programmed in a way that shouldn't be possible, given our
12402          * understanding of watermark requirements.  This might mean a
12403          * mistake in the hardware readout code or a mistake in the
12404          * watermark calculations for a given platform.  Raise a WARN
12405          * so that this is noticeable.
12406          *
12407          * If this actually happens, we'll have to just leave the
12408          * BIOS-programmed watermarks untouched and hope for the best.
12409          */
12410         drm_WARN(&dev_priv->drm, ret,
12411                  "Could not determine valid watermarks for inherited state\n");
12412
12413         drm_atomic_state_put(state);
12414
12415         drm_modeset_drop_locks(&ctx);
12416         drm_modeset_acquire_fini(&ctx);
12417 }
12418
12419 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12420 {
12421         if (IS_IRONLAKE(dev_priv)) {
12422                 u32 fdi_pll_clk =
12423                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12424
12425                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12426         } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12427                 dev_priv->fdi_pll_freq = 270000;
12428         } else {
12429                 return;
12430         }
12431
12432         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12433 }
12434
12435 static int intel_initial_commit(struct drm_device *dev)
12436 {
12437         struct drm_atomic_state *state = NULL;
12438         struct drm_modeset_acquire_ctx ctx;
12439         struct intel_crtc *crtc;
12440         int ret = 0;
12441
12442         state = drm_atomic_state_alloc(dev);
12443         if (!state)
12444                 return -ENOMEM;
12445
12446         drm_modeset_acquire_init(&ctx, 0);
12447
12448 retry:
12449         state->acquire_ctx = &ctx;
12450
12451         for_each_intel_crtc(dev, crtc) {
12452                 struct intel_crtc_state *crtc_state =
12453                         intel_atomic_get_crtc_state(state, crtc);
12454
12455                 if (IS_ERR(crtc_state)) {
12456                         ret = PTR_ERR(crtc_state);
12457                         goto out;
12458                 }
12459
12460                 if (crtc_state->hw.active) {
12461                         struct intel_encoder *encoder;
12462
12463                         /*
12464                          * We've not yet detected sink capabilities
12465                          * (audio,infoframes,etc.) and thus we don't want to
12466                          * force a full state recomputation yet. We want that to
12467                          * happen only for the first real commit from userspace.
12468                          * So preserve the inherited flag for the time being.
12469                          */
12470                         crtc_state->inherited = true;
12471
12472                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
12473                         if (ret)
12474                                 goto out;
12475
12476                         /*
12477                          * FIXME hack to force a LUT update to avoid the
12478                          * plane update forcing the pipe gamma on without
12479                          * having a proper LUT loaded. Remove once we
12480                          * have readout for pipe gamma enable.
12481                          */
12482                         crtc_state->uapi.color_mgmt_changed = true;
12483
12484                         for_each_intel_encoder_mask(dev, encoder,
12485                                                     crtc_state->uapi.encoder_mask) {
12486                                 if (encoder->initial_fastset_check &&
12487                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
12488                                         ret = drm_atomic_add_affected_connectors(state,
12489                                                                                  &crtc->base);
12490                                         if (ret)
12491                                                 goto out;
12492                                 }
12493                         }
12494                 }
12495         }
12496
12497         ret = drm_atomic_commit(state);
12498
12499 out:
12500         if (ret == -EDEADLK) {
12501                 drm_atomic_state_clear(state);
12502                 drm_modeset_backoff(&ctx);
12503                 goto retry;
12504         }
12505
12506         drm_atomic_state_put(state);
12507
12508         drm_modeset_drop_locks(&ctx);
12509         drm_modeset_acquire_fini(&ctx);
12510
12511         return ret;
12512 }
12513
12514 static void intel_mode_config_init(struct drm_i915_private *i915)
12515 {
12516         struct drm_mode_config *mode_config = &i915->drm.mode_config;
12517
12518         drm_mode_config_init(&i915->drm);
12519         INIT_LIST_HEAD(&i915->global_obj_list);
12520
12521         mode_config->min_width = 0;
12522         mode_config->min_height = 0;
12523
12524         mode_config->preferred_depth = 24;
12525         mode_config->prefer_shadow = 1;
12526
12527         mode_config->allow_fb_modifiers = true;
12528
12529         mode_config->funcs = &intel_mode_funcs;
12530
12531         mode_config->async_page_flip = has_async_flips(i915);
12532
12533         /*
12534          * Maximum framebuffer dimensions, chosen to match
12535          * the maximum render engine surface size on gen4+.
12536          */
12537         if (INTEL_GEN(i915) >= 7) {
12538                 mode_config->max_width = 16384;
12539                 mode_config->max_height = 16384;
12540         } else if (INTEL_GEN(i915) >= 4) {
12541                 mode_config->max_width = 8192;
12542                 mode_config->max_height = 8192;
12543         } else if (IS_GEN(i915, 3)) {
12544                 mode_config->max_width = 4096;
12545                 mode_config->max_height = 4096;
12546         } else {
12547                 mode_config->max_width = 2048;
12548                 mode_config->max_height = 2048;
12549         }
12550
12551         if (IS_I845G(i915) || IS_I865G(i915)) {
12552                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12553                 mode_config->cursor_height = 1023;
12554         } else if (IS_I830(i915) || IS_I85X(i915) ||
12555                    IS_I915G(i915) || IS_I915GM(i915)) {
12556                 mode_config->cursor_width = 64;
12557                 mode_config->cursor_height = 64;
12558         } else {
12559                 mode_config->cursor_width = 256;
12560                 mode_config->cursor_height = 256;
12561         }
12562 }
12563
12564 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12565 {
12566         intel_atomic_global_obj_cleanup(i915);
12567         drm_mode_config_cleanup(&i915->drm);
12568 }
12569
12570 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12571 {
12572         if (plane_config->fb) {
12573                 struct drm_framebuffer *fb = &plane_config->fb->base;
12574
12575                 /* We may only have the stub and not a full framebuffer */
12576                 if (drm_framebuffer_read_refcount(fb))
12577                         drm_framebuffer_put(fb);
12578                 else
12579                         kfree(fb);
12580         }
12581
12582         if (plane_config->vma)
12583                 i915_vma_put(plane_config->vma);
12584 }
12585
12586 /* part #1: call before irq install */
12587 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12588 {
12589         int ret;
12590
12591         if (i915_inject_probe_failure(i915))
12592                 return -ENODEV;
12593
12594         if (HAS_DISPLAY(i915)) {
12595                 ret = drm_vblank_init(&i915->drm,
12596                                       INTEL_NUM_PIPES(i915));
12597                 if (ret)
12598                         return ret;
12599         }
12600
12601         intel_bios_init(i915);
12602
12603         ret = intel_vga_register(i915);
12604         if (ret)
12605                 goto cleanup_bios;
12606
12607         /* FIXME: completely on the wrong abstraction layer */
12608         intel_power_domains_init_hw(i915, false);
12609
12610         intel_csr_ucode_init(i915);
12611
12612         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12613         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12614                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12615
12616         i915->framestart_delay = 1; /* 1-4 */
12617
12618         intel_mode_config_init(i915);
12619
12620         ret = intel_cdclk_init(i915);
12621         if (ret)
12622                 goto cleanup_vga_client_pw_domain_csr;
12623
12624         ret = intel_dbuf_init(i915);
12625         if (ret)
12626                 goto cleanup_vga_client_pw_domain_csr;
12627
12628         ret = intel_bw_init(i915);
12629         if (ret)
12630                 goto cleanup_vga_client_pw_domain_csr;
12631
12632         init_llist_head(&i915->atomic_helper.free_list);
12633         INIT_WORK(&i915->atomic_helper.free_work,
12634                   intel_atomic_helper_free_state_worker);
12635
12636         intel_init_quirks(i915);
12637
12638         intel_fbc_init(i915);
12639
12640         return 0;
12641
12642 cleanup_vga_client_pw_domain_csr:
12643         intel_csr_ucode_fini(i915);
12644         intel_power_domains_driver_remove(i915);
12645         intel_vga_unregister(i915);
12646 cleanup_bios:
12647         intel_bios_driver_remove(i915);
12648
12649         return ret;
12650 }
12651
12652 /* part #2: call after irq install, but before gem init */
12653 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12654 {
12655         struct drm_device *dev = &i915->drm;
12656         enum pipe pipe;
12657         struct intel_crtc *crtc;
12658         int ret;
12659
12660         intel_init_pm(i915);
12661
12662         intel_panel_sanitize_ssc(i915);
12663
12664         intel_pps_setup(i915);
12665
12666         intel_gmbus_setup(i915);
12667
12668         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12669                     INTEL_NUM_PIPES(i915),
12670                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12671
12672         if (HAS_DISPLAY(i915)) {
12673                 for_each_pipe(i915, pipe) {
12674                         ret = intel_crtc_init(i915, pipe);
12675                         if (ret) {
12676                                 intel_mode_config_cleanup(i915);
12677                                 return ret;
12678                         }
12679                 }
12680         }
12681
12682         intel_plane_possible_crtcs_init(i915);
12683         intel_shared_dpll_init(dev);
12684         intel_update_fdi_pll_freq(i915);
12685
12686         intel_update_czclk(i915);
12687         intel_modeset_init_hw(i915);
12688         intel_dpll_update_ref_clks(i915);
12689
12690         intel_hdcp_component_init(i915);
12691
12692         if (i915->max_cdclk_freq == 0)
12693                 intel_update_max_cdclk(i915);
12694
12695         /*
12696          * If the platform has HTI, we need to find out whether it has reserved
12697          * any display resources before we create our display outputs.
12698          */
12699         if (INTEL_INFO(i915)->display.has_hti)
12700                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12701
12702         /* Just disable it once at startup */
12703         intel_vga_disable(i915);
12704         intel_setup_outputs(i915);
12705
12706         drm_modeset_lock_all(dev);
12707         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12708         drm_modeset_unlock_all(dev);
12709
12710         for_each_intel_crtc(dev, crtc) {
12711                 struct intel_initial_plane_config plane_config = {};
12712
12713                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12714                         continue;
12715
12716                 /*
12717                  * Note that reserving the BIOS fb up front prevents us
12718                  * from stuffing other stolen allocations like the ring
12719                  * on top.  This prevents some ugliness at boot time, and
12720                  * can even allow for smooth boot transitions if the BIOS
12721                  * fb is large enough for the active pipe configuration.
12722                  */
12723                 i915->display.get_initial_plane_config(crtc, &plane_config);
12724
12725                 /*
12726                  * If the fb is shared between multiple heads, we'll
12727                  * just get the first one.
12728                  */
12729                 intel_find_initial_plane_obj(crtc, &plane_config);
12730
12731                 plane_config_fini(&plane_config);
12732         }
12733
12734         /*
12735          * Make sure hardware watermarks really match the state we read out.
12736          * Note that we need to do this after reconstructing the BIOS fb's
12737          * since the watermark calculation done here will use pstate->fb.
12738          */
12739         if (!HAS_GMCH(i915))
12740                 sanitize_watermarks(i915);
12741
12742         return 0;
12743 }
12744
12745 /* part #3: call after gem init */
12746 int intel_modeset_init(struct drm_i915_private *i915)
12747 {
12748         int ret;
12749
12750         if (!HAS_DISPLAY(i915))
12751                 return 0;
12752
12753         /*
12754          * Force all active planes to recompute their states. So that on
12755          * mode_setcrtc after probe, all the intel_plane_state variables
12756          * are already calculated and there is no assert_plane warnings
12757          * during bootup.
12758          */
12759         ret = intel_initial_commit(&i915->drm);
12760         if (ret)
12761                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12762
12763         intel_overlay_setup(i915);
12764
12765         ret = intel_fbdev_init(&i915->drm);
12766         if (ret)
12767                 return ret;
12768
12769         /* Only enable hotplug handling once the fbdev is fully set up. */
12770         intel_hpd_init(i915);
12771         intel_hpd_poll_disable(i915);
12772
12773         intel_init_ipc(i915);
12774
12775         return 0;
12776 }
12777
12778 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12779 {
12780         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12781         /* 640x480@60Hz, ~25175 kHz */
12782         struct dpll clock = {
12783                 .m1 = 18,
12784                 .m2 = 7,
12785                 .p1 = 13,
12786                 .p2 = 4,
12787                 .n = 2,
12788         };
12789         u32 dpll, fp;
12790         int i;
12791
12792         drm_WARN_ON(&dev_priv->drm,
12793                     i9xx_calc_dpll_params(48000, &clock) != 25154);
12794
12795         drm_dbg_kms(&dev_priv->drm,
12796                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12797                     pipe_name(pipe), clock.vco, clock.dot);
12798
12799         fp = i9xx_dpll_compute_fp(&clock);
12800         dpll = DPLL_DVO_2X_MODE |
12801                 DPLL_VGA_MODE_DIS |
12802                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12803                 PLL_P2_DIVIDE_BY_4 |
12804                 PLL_REF_INPUT_DREFCLK |
12805                 DPLL_VCO_ENABLE;
12806
12807         intel_de_write(dev_priv, FP0(pipe), fp);
12808         intel_de_write(dev_priv, FP1(pipe), fp);
12809
12810         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12811         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12812         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12813         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12814         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12815         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12816         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12817
12818         /*
12819          * Apparently we need to have VGA mode enabled prior to changing
12820          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12821          * dividers, even though the register value does change.
12822          */
12823         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12824         intel_de_write(dev_priv, DPLL(pipe), dpll);
12825
12826         /* Wait for the clocks to stabilize. */
12827         intel_de_posting_read(dev_priv, DPLL(pipe));
12828         udelay(150);
12829
12830         /* The pixel multiplier can only be updated once the
12831          * DPLL is enabled and the clocks are stable.
12832          *
12833          * So write it again.
12834          */
12835         intel_de_write(dev_priv, DPLL(pipe), dpll);
12836
12837         /* We do this three times for luck */
12838         for (i = 0; i < 3 ; i++) {
12839                 intel_de_write(dev_priv, DPLL(pipe), dpll);
12840                 intel_de_posting_read(dev_priv, DPLL(pipe));
12841                 udelay(150); /* wait for warmup */
12842         }
12843
12844         intel_de_write(dev_priv, PIPECONF(pipe),
12845                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12846         intel_de_posting_read(dev_priv, PIPECONF(pipe));
12847
12848         intel_wait_for_pipe_scanline_moving(crtc);
12849 }
12850
12851 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12852 {
12853         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12854
12855         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12856                     pipe_name(pipe));
12857
12858         drm_WARN_ON(&dev_priv->drm,
12859                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12860                     DISPLAY_PLANE_ENABLE);
12861         drm_WARN_ON(&dev_priv->drm,
12862                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12863                     DISPLAY_PLANE_ENABLE);
12864         drm_WARN_ON(&dev_priv->drm,
12865                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12866                     DISPLAY_PLANE_ENABLE);
12867         drm_WARN_ON(&dev_priv->drm,
12868                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12869         drm_WARN_ON(&dev_priv->drm,
12870                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12871
12872         intel_de_write(dev_priv, PIPECONF(pipe), 0);
12873         intel_de_posting_read(dev_priv, PIPECONF(pipe));
12874
12875         intel_wait_for_pipe_scanline_stopped(crtc);
12876
12877         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12878         intel_de_posting_read(dev_priv, DPLL(pipe));
12879 }
12880
12881 static void
12882 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12883 {
12884         struct intel_crtc *crtc;
12885
12886         if (INTEL_GEN(dev_priv) >= 4)
12887                 return;
12888
12889         for_each_intel_crtc(&dev_priv->drm, crtc) {
12890                 struct intel_plane *plane =
12891                         to_intel_plane(crtc->base.primary);
12892                 struct intel_crtc *plane_crtc;
12893                 enum pipe pipe;
12894
12895                 if (!plane->get_hw_state(plane, &pipe))
12896                         continue;
12897
12898                 if (pipe == crtc->pipe)
12899                         continue;
12900
12901                 drm_dbg_kms(&dev_priv->drm,
12902                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12903                             plane->base.base.id, plane->base.name);
12904
12905                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12906                 intel_plane_disable_noatomic(plane_crtc, plane);
12907         }
12908 }
12909
12910 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12911 {
12912         struct drm_device *dev = crtc->base.dev;
12913         struct intel_encoder *encoder;
12914
12915         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12916                 return true;
12917
12918         return false;
12919 }
12920
12921 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12922 {
12923         struct drm_device *dev = encoder->base.dev;
12924         struct intel_connector *connector;
12925
12926         for_each_connector_on_encoder(dev, &encoder->base, connector)
12927                 return connector;
12928
12929         return NULL;
12930 }
12931
12932 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12933                               enum pipe pch_transcoder)
12934 {
12935         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12936                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12937 }
12938
12939 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12940 {
12941         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12942         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12943         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12944
12945         if (INTEL_GEN(dev_priv) >= 9 ||
12946             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12947                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12948                 u32 val;
12949
12950                 if (transcoder_is_dsi(cpu_transcoder))
12951                         return;
12952
12953                 val = intel_de_read(dev_priv, reg);
12954                 val &= ~HSW_FRAME_START_DELAY_MASK;
12955                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12956                 intel_de_write(dev_priv, reg, val);
12957         } else {
12958                 i915_reg_t reg = PIPECONF(cpu_transcoder);
12959                 u32 val;
12960
12961                 val = intel_de_read(dev_priv, reg);
12962                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12963                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12964                 intel_de_write(dev_priv, reg, val);
12965         }
12966
12967         if (!crtc_state->has_pch_encoder)
12968                 return;
12969
12970         if (HAS_PCH_IBX(dev_priv)) {
12971                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12972                 u32 val;
12973
12974                 val = intel_de_read(dev_priv, reg);
12975                 val &= ~TRANS_FRAME_START_DELAY_MASK;
12976                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12977                 intel_de_write(dev_priv, reg, val);
12978         } else {
12979                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12980                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12981                 u32 val;
12982
12983                 val = intel_de_read(dev_priv, reg);
12984                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12985                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12986                 intel_de_write(dev_priv, reg, val);
12987         }
12988 }
12989
12990 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12991                                 struct drm_modeset_acquire_ctx *ctx)
12992 {
12993         struct drm_device *dev = crtc->base.dev;
12994         struct drm_i915_private *dev_priv = to_i915(dev);
12995         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12996
12997         if (crtc_state->hw.active) {
12998                 struct intel_plane *plane;
12999
13000                 /* Clear any frame start delays used for debugging left by the BIOS */
13001                 intel_sanitize_frame_start_delay(crtc_state);
13002
13003                 /* Disable everything but the primary plane */
13004                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
13005                         const struct intel_plane_state *plane_state =
13006                                 to_intel_plane_state(plane->base.state);
13007
13008                         if (plane_state->uapi.visible &&
13009                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
13010                                 intel_plane_disable_noatomic(crtc, plane);
13011                 }
13012
13013                 /*
13014                  * Disable any background color set by the BIOS, but enable the
13015                  * gamma and CSC to match how we program our planes.
13016                  */
13017                 if (INTEL_GEN(dev_priv) >= 9)
13018                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
13019                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
13020         }
13021
13022         /* Adjust the state of the output pipe according to whether we
13023          * have active connectors/encoders. */
13024         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
13025             !crtc_state->bigjoiner_slave)
13026                 intel_crtc_disable_noatomic(crtc, ctx);
13027
13028         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
13029                 /*
13030                  * We start out with underrun reporting disabled to avoid races.
13031                  * For correct bookkeeping mark this on active crtcs.
13032                  *
13033                  * Also on gmch platforms we dont have any hardware bits to
13034                  * disable the underrun reporting. Which means we need to start
13035                  * out with underrun reporting disabled also on inactive pipes,
13036                  * since otherwise we'll complain about the garbage we read when
13037                  * e.g. coming up after runtime pm.
13038                  *
13039                  * No protection against concurrent access is required - at
13040                  * worst a fifo underrun happens which also sets this to false.
13041                  */
13042                 crtc->cpu_fifo_underrun_disabled = true;
13043                 /*
13044                  * We track the PCH trancoder underrun reporting state
13045                  * within the crtc. With crtc for pipe A housing the underrun
13046                  * reporting state for PCH transcoder A, crtc for pipe B housing
13047                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
13048                  * and marking underrun reporting as disabled for the non-existing
13049                  * PCH transcoders B and C would prevent enabling the south
13050                  * error interrupt (see cpt_can_enable_serr_int()).
13051                  */
13052                 if (has_pch_trancoder(dev_priv, crtc->pipe))
13053                         crtc->pch_fifo_underrun_disabled = true;
13054         }
13055 }
13056
13057 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
13058 {
13059         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
13060
13061         /*
13062          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
13063          * the hardware when a high res displays plugged in. DPLL P
13064          * divider is zero, and the pipe timings are bonkers. We'll
13065          * try to disable everything in that case.
13066          *
13067          * FIXME would be nice to be able to sanitize this state
13068          * without several WARNs, but for now let's take the easy
13069          * road.
13070          */
13071         return IS_SANDYBRIDGE(dev_priv) &&
13072                 crtc_state->hw.active &&
13073                 crtc_state->shared_dpll &&
13074                 crtc_state->port_clock == 0;
13075 }
13076
13077 static void intel_sanitize_encoder(struct intel_encoder *encoder)
13078 {
13079         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
13080         struct intel_connector *connector;
13081         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
13082         struct intel_crtc_state *crtc_state = crtc ?
13083                 to_intel_crtc_state(crtc->base.state) : NULL;
13084
13085         /* We need to check both for a crtc link (meaning that the
13086          * encoder is active and trying to read from a pipe) and the
13087          * pipe itself being active. */
13088         bool has_active_crtc = crtc_state &&
13089                 crtc_state->hw.active;
13090
13091         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
13092                 drm_dbg_kms(&dev_priv->drm,
13093                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
13094                             pipe_name(crtc->pipe));
13095                 has_active_crtc = false;
13096         }
13097
13098         connector = intel_encoder_find_connector(encoder);
13099         if (connector && !has_active_crtc) {
13100                 drm_dbg_kms(&dev_priv->drm,
13101                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13102                             encoder->base.base.id,
13103                             encoder->base.name);
13104
13105                 /* Connector is active, but has no active pipe. This is
13106                  * fallout from our resume register restoring. Disable
13107                  * the encoder manually again. */
13108                 if (crtc_state) {
13109                         struct drm_encoder *best_encoder;
13110
13111                         drm_dbg_kms(&dev_priv->drm,
13112                                     "[ENCODER:%d:%s] manually disabled\n",
13113                                     encoder->base.base.id,
13114                                     encoder->base.name);
13115
13116                         /* avoid oopsing in case the hooks consult best_encoder */
13117                         best_encoder = connector->base.state->best_encoder;
13118                         connector->base.state->best_encoder = &encoder->base;
13119
13120                         /* FIXME NULL atomic state passed! */
13121                         if (encoder->disable)
13122                                 encoder->disable(NULL, encoder, crtc_state,
13123                                                  connector->base.state);
13124                         if (encoder->post_disable)
13125                                 encoder->post_disable(NULL, encoder, crtc_state,
13126                                                       connector->base.state);
13127
13128                         connector->base.state->best_encoder = best_encoder;
13129                 }
13130                 encoder->base.crtc = NULL;
13131
13132                 /* Inconsistent output/port/pipe state happens presumably due to
13133                  * a bug in one of the get_hw_state functions. Or someplace else
13134                  * in our code, like the register restore mess on resume. Clamp
13135                  * things to off as a safer default. */
13136
13137                 connector->base.dpms = DRM_MODE_DPMS_OFF;
13138                 connector->base.encoder = NULL;
13139         }
13140
13141         /* notify opregion of the sanitized encoder state */
13142         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
13143
13144         if (HAS_DDI(dev_priv))
13145                 intel_ddi_sanitize_encoder_pll_mapping(encoder);
13146 }
13147
13148 /* FIXME read out full plane state for all planes */
13149 static void readout_plane_state(struct drm_i915_private *dev_priv)
13150 {
13151         struct intel_plane *plane;
13152         struct intel_crtc *crtc;
13153
13154         for_each_intel_plane(&dev_priv->drm, plane) {
13155                 struct intel_plane_state *plane_state =
13156                         to_intel_plane_state(plane->base.state);
13157                 struct intel_crtc_state *crtc_state;
13158                 enum pipe pipe = PIPE_A;
13159                 bool visible;
13160
13161                 visible = plane->get_hw_state(plane, &pipe);
13162
13163                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13164                 crtc_state = to_intel_crtc_state(crtc->base.state);
13165
13166                 intel_set_plane_visible(crtc_state, plane_state, visible);
13167
13168                 drm_dbg_kms(&dev_priv->drm,
13169                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
13170                             plane->base.base.id, plane->base.name,
13171                             enableddisabled(visible), pipe_name(pipe));
13172         }
13173
13174         for_each_intel_crtc(&dev_priv->drm, crtc) {
13175                 struct intel_crtc_state *crtc_state =
13176                         to_intel_crtc_state(crtc->base.state);
13177
13178                 fixup_plane_bitmasks(crtc_state);
13179         }
13180 }
13181
13182 static void intel_modeset_readout_hw_state(struct drm_device *dev)
13183 {
13184         struct drm_i915_private *dev_priv = to_i915(dev);
13185         struct intel_cdclk_state *cdclk_state =
13186                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
13187         struct intel_dbuf_state *dbuf_state =
13188                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
13189         enum pipe pipe;
13190         struct intel_crtc *crtc;
13191         struct intel_encoder *encoder;
13192         struct intel_connector *connector;
13193         struct drm_connector_list_iter conn_iter;
13194         u8 active_pipes = 0;
13195
13196         for_each_intel_crtc(dev, crtc) {
13197                 struct intel_crtc_state *crtc_state =
13198                         to_intel_crtc_state(crtc->base.state);
13199
13200                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
13201                 intel_crtc_free_hw_state(crtc_state);
13202                 intel_crtc_state_reset(crtc_state, crtc);
13203
13204                 intel_crtc_get_pipe_config(crtc_state);
13205
13206                 crtc_state->hw.enable = crtc_state->hw.active;
13207
13208                 crtc->base.enabled = crtc_state->hw.enable;
13209                 crtc->active = crtc_state->hw.active;
13210
13211                 if (crtc_state->hw.active)
13212                         active_pipes |= BIT(crtc->pipe);
13213
13214                 drm_dbg_kms(&dev_priv->drm,
13215                             "[CRTC:%d:%s] hw state readout: %s\n",
13216                             crtc->base.base.id, crtc->base.name,
13217                             enableddisabled(crtc_state->hw.active));
13218         }
13219
13220         dev_priv->active_pipes = cdclk_state->active_pipes =
13221                 dbuf_state->active_pipes = active_pipes;
13222
13223         readout_plane_state(dev_priv);
13224
13225         for_each_intel_encoder(dev, encoder) {
13226                 pipe = 0;
13227
13228                 if (encoder->get_hw_state(encoder, &pipe)) {
13229                         struct intel_crtc_state *crtc_state;
13230
13231                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13232                         crtc_state = to_intel_crtc_state(crtc->base.state);
13233
13234                         encoder->base.crtc = &crtc->base;
13235                         intel_encoder_get_config(encoder, crtc_state);
13236                         if (encoder->sync_state)
13237                                 encoder->sync_state(encoder, crtc_state);
13238
13239                         /* read out to slave crtc as well for bigjoiner */
13240                         if (crtc_state->bigjoiner) {
13241                                 /* encoder should read be linked to bigjoiner master */
13242                                 WARN_ON(crtc_state->bigjoiner_slave);
13243
13244                                 crtc = crtc_state->bigjoiner_linked_crtc;
13245                                 crtc_state = to_intel_crtc_state(crtc->base.state);
13246                                 intel_encoder_get_config(encoder, crtc_state);
13247                         }
13248                 } else {
13249                         encoder->base.crtc = NULL;
13250                 }
13251
13252                 drm_dbg_kms(&dev_priv->drm,
13253                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
13254                             encoder->base.base.id, encoder->base.name,
13255                             enableddisabled(encoder->base.crtc),
13256                             pipe_name(pipe));
13257         }
13258
13259         intel_dpll_readout_hw_state(dev_priv);
13260
13261         drm_connector_list_iter_begin(dev, &conn_iter);
13262         for_each_intel_connector_iter(connector, &conn_iter) {
13263                 if (connector->get_hw_state(connector)) {
13264                         struct intel_crtc_state *crtc_state;
13265                         struct intel_crtc *crtc;
13266
13267                         connector->base.dpms = DRM_MODE_DPMS_ON;
13268
13269                         encoder = intel_attached_encoder(connector);
13270                         connector->base.encoder = &encoder->base;
13271
13272                         crtc = to_intel_crtc(encoder->base.crtc);
13273                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
13274
13275                         if (crtc_state && crtc_state->hw.active) {
13276                                 /*
13277                                  * This has to be done during hardware readout
13278                                  * because anything calling .crtc_disable may
13279                                  * rely on the connector_mask being accurate.
13280                                  */
13281                                 crtc_state->uapi.connector_mask |=
13282                                         drm_connector_mask(&connector->base);
13283                                 crtc_state->uapi.encoder_mask |=
13284                                         drm_encoder_mask(&encoder->base);
13285                         }
13286                 } else {
13287                         connector->base.dpms = DRM_MODE_DPMS_OFF;
13288                         connector->base.encoder = NULL;
13289                 }
13290                 drm_dbg_kms(&dev_priv->drm,
13291                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
13292                             connector->base.base.id, connector->base.name,
13293                             enableddisabled(connector->base.encoder));
13294         }
13295         drm_connector_list_iter_end(&conn_iter);
13296
13297         for_each_intel_crtc(dev, crtc) {
13298                 struct intel_bw_state *bw_state =
13299                         to_intel_bw_state(dev_priv->bw_obj.state);
13300                 struct intel_crtc_state *crtc_state =
13301                         to_intel_crtc_state(crtc->base.state);
13302                 struct intel_plane *plane;
13303                 int min_cdclk = 0;
13304
13305                 if (crtc_state->bigjoiner_slave)
13306                         continue;
13307
13308                 if (crtc_state->hw.active) {
13309                         /*
13310                          * The initial mode needs to be set in order to keep
13311                          * the atomic core happy. It wants a valid mode if the
13312                          * crtc's enabled, so we do the above call.
13313                          *
13314                          * But we don't set all the derived state fully, hence
13315                          * set a flag to indicate that a full recalculation is
13316                          * needed on the next commit.
13317                          */
13318                         crtc_state->inherited = true;
13319
13320                         intel_crtc_update_active_timings(crtc_state);
13321
13322                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
13323                 }
13324
13325                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13326                         const struct intel_plane_state *plane_state =
13327                                 to_intel_plane_state(plane->base.state);
13328
13329                         /*
13330                          * FIXME don't have the fb yet, so can't
13331                          * use intel_plane_data_rate() :(
13332                          */
13333                         if (plane_state->uapi.visible)
13334                                 crtc_state->data_rate[plane->id] =
13335                                         4 * crtc_state->pixel_rate;
13336                         /*
13337                          * FIXME don't have the fb yet, so can't
13338                          * use plane->min_cdclk() :(
13339                          */
13340                         if (plane_state->uapi.visible && plane->min_cdclk) {
13341                                 if (crtc_state->double_wide ||
13342                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13343                                         crtc_state->min_cdclk[plane->id] =
13344                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13345                                 else
13346                                         crtc_state->min_cdclk[plane->id] =
13347                                                 crtc_state->pixel_rate;
13348                         }
13349                         drm_dbg_kms(&dev_priv->drm,
13350                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
13351                                     plane->base.base.id, plane->base.name,
13352                                     crtc_state->min_cdclk[plane->id]);
13353                 }
13354
13355                 if (crtc_state->hw.active) {
13356                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13357                         if (drm_WARN_ON(dev, min_cdclk < 0))
13358                                 min_cdclk = 0;
13359                 }
13360
13361                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13362                 cdclk_state->min_voltage_level[crtc->pipe] =
13363                         crtc_state->min_voltage_level;
13364
13365                 intel_bw_crtc_update(bw_state, crtc_state);
13366
13367                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
13368
13369                 /* discard our incomplete slave state, copy it from master */
13370                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
13371                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13372                         struct intel_crtc_state *slave_crtc_state =
13373                                 to_intel_crtc_state(slave->base.state);
13374
13375                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13376                         slave->base.mode = crtc->base.mode;
13377
13378                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13379                         cdclk_state->min_voltage_level[slave->pipe] =
13380                                 crtc_state->min_voltage_level;
13381
13382                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13383                                 const struct intel_plane_state *plane_state =
13384                                         to_intel_plane_state(plane->base.state);
13385
13386                                 /*
13387                                  * FIXME don't have the fb yet, so can't
13388                                  * use intel_plane_data_rate() :(
13389                                  */
13390                                 if (plane_state->uapi.visible)
13391                                         crtc_state->data_rate[plane->id] =
13392                                                 4 * crtc_state->pixel_rate;
13393                                 else
13394                                         crtc_state->data_rate[plane->id] = 0;
13395                         }
13396
13397                         intel_bw_crtc_update(bw_state, slave_crtc_state);
13398                         drm_calc_timestamping_constants(&slave->base,
13399                                                         &slave_crtc_state->hw.adjusted_mode);
13400                 }
13401         }
13402 }
13403
13404 static void
13405 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13406 {
13407         struct intel_encoder *encoder;
13408
13409         for_each_intel_encoder(&dev_priv->drm, encoder) {
13410                 struct intel_crtc_state *crtc_state;
13411
13412                 if (!encoder->get_power_domains)
13413                         continue;
13414
13415                 /*
13416                  * MST-primary and inactive encoders don't have a crtc state
13417                  * and neither of these require any power domain references.
13418                  */
13419                 if (!encoder->base.crtc)
13420                         continue;
13421
13422                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13423                 encoder->get_power_domains(encoder, crtc_state);
13424         }
13425 }
13426
13427 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13428 {
13429         /*
13430          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
13431          * Also known as Wa_14010480278.
13432          */
13433         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
13434                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13435                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13436
13437         if (IS_HASWELL(dev_priv)) {
13438                 /*
13439                  * WaRsPkgCStateDisplayPMReq:hsw
13440                  * System hang if this isn't done before disabling all planes!
13441                  */
13442                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
13443                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13444         }
13445
13446         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13447                 /* Display WA #1142:kbl,cfl,cml */
13448                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13449                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13450                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13451                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13452                              KBL_ARB_FILL_SPARE_14);
13453         }
13454 }
13455
13456 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13457                                        enum port port, i915_reg_t hdmi_reg)
13458 {
13459         u32 val = intel_de_read(dev_priv, hdmi_reg);
13460
13461         if (val & SDVO_ENABLE ||
13462             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13463                 return;
13464
13465         drm_dbg_kms(&dev_priv->drm,
13466                     "Sanitizing transcoder select for HDMI %c\n",
13467                     port_name(port));
13468
13469         val &= ~SDVO_PIPE_SEL_MASK;
13470         val |= SDVO_PIPE_SEL(PIPE_A);
13471
13472         intel_de_write(dev_priv, hdmi_reg, val);
13473 }
13474
13475 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13476                                      enum port port, i915_reg_t dp_reg)
13477 {
13478         u32 val = intel_de_read(dev_priv, dp_reg);
13479
13480         if (val & DP_PORT_EN ||
13481             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13482                 return;
13483
13484         drm_dbg_kms(&dev_priv->drm,
13485                     "Sanitizing transcoder select for DP %c\n",
13486                     port_name(port));
13487
13488         val &= ~DP_PIPE_SEL_MASK;
13489         val |= DP_PIPE_SEL(PIPE_A);
13490
13491         intel_de_write(dev_priv, dp_reg, val);
13492 }
13493
13494 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13495 {
13496         /*
13497          * The BIOS may select transcoder B on some of the PCH
13498          * ports even it doesn't enable the port. This would trip
13499          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13500          * Sanitize the transcoder select bits to prevent that. We
13501          * assume that the BIOS never actually enabled the port,
13502          * because if it did we'd actually have to toggle the port
13503          * on and back off to make the transcoder A select stick
13504          * (see. intel_dp_link_down(), intel_disable_hdmi(),
13505          * intel_disable_sdvo()).
13506          */
13507         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13508         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13509         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13510
13511         /* PCH SDVOB multiplex with HDMIB */
13512         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13513         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13514         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13515 }
13516
13517 /* Scan out the current hw modeset state,
13518  * and sanitizes it to the current state
13519  */
13520 static void
13521 intel_modeset_setup_hw_state(struct drm_device *dev,
13522                              struct drm_modeset_acquire_ctx *ctx)
13523 {
13524         struct drm_i915_private *dev_priv = to_i915(dev);
13525         struct intel_encoder *encoder;
13526         struct intel_crtc *crtc;
13527         intel_wakeref_t wakeref;
13528
13529         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13530
13531         intel_early_display_was(dev_priv);
13532         intel_modeset_readout_hw_state(dev);
13533
13534         /* HW state is read out, now we need to sanitize this mess. */
13535
13536         /* Sanitize the TypeC port mode upfront, encoders depend on this */
13537         for_each_intel_encoder(dev, encoder) {
13538                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
13539
13540                 /* We need to sanitize only the MST primary port. */
13541                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
13542                     intel_phy_is_tc(dev_priv, phy))
13543                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
13544         }
13545
13546         get_encoder_power_domains(dev_priv);
13547
13548         if (HAS_PCH_IBX(dev_priv))
13549                 ibx_sanitize_pch_ports(dev_priv);
13550
13551         /*
13552          * intel_sanitize_plane_mapping() may need to do vblank
13553          * waits, so we need vblank interrupts restored beforehand.
13554          */
13555         for_each_intel_crtc(&dev_priv->drm, crtc) {
13556                 struct intel_crtc_state *crtc_state =
13557                         to_intel_crtc_state(crtc->base.state);
13558
13559                 drm_crtc_vblank_reset(&crtc->base);
13560
13561                 if (crtc_state->hw.active)
13562                         intel_crtc_vblank_on(crtc_state);
13563         }
13564
13565         intel_sanitize_plane_mapping(dev_priv);
13566
13567         for_each_intel_encoder(dev, encoder)
13568                 intel_sanitize_encoder(encoder);
13569
13570         for_each_intel_crtc(&dev_priv->drm, crtc) {
13571                 struct intel_crtc_state *crtc_state =
13572                         to_intel_crtc_state(crtc->base.state);
13573
13574                 intel_sanitize_crtc(crtc, ctx);
13575                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13576         }
13577
13578         intel_modeset_update_connector_atomic_state(dev);
13579
13580         intel_dpll_sanitize_state(dev_priv);
13581
13582         if (IS_G4X(dev_priv)) {
13583                 g4x_wm_get_hw_state(dev_priv);
13584                 g4x_wm_sanitize(dev_priv);
13585         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13586                 vlv_wm_get_hw_state(dev_priv);
13587                 vlv_wm_sanitize(dev_priv);
13588         } else if (INTEL_GEN(dev_priv) >= 9) {
13589                 skl_wm_get_hw_state(dev_priv);
13590         } else if (HAS_PCH_SPLIT(dev_priv)) {
13591                 ilk_wm_get_hw_state(dev_priv);
13592         }
13593
13594         for_each_intel_crtc(dev, crtc) {
13595                 struct intel_crtc_state *crtc_state =
13596                         to_intel_crtc_state(crtc->base.state);
13597                 u64 put_domains;
13598
13599                 put_domains = modeset_get_crtc_power_domains(crtc_state);
13600                 if (drm_WARN_ON(dev, put_domains))
13601                         modeset_put_crtc_power_domains(crtc, put_domains);
13602         }
13603
13604         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13605 }
13606
13607 void intel_display_resume(struct drm_device *dev)
13608 {
13609         struct drm_i915_private *dev_priv = to_i915(dev);
13610         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13611         struct drm_modeset_acquire_ctx ctx;
13612         int ret;
13613
13614         dev_priv->modeset_restore_state = NULL;
13615         if (state)
13616                 state->acquire_ctx = &ctx;
13617
13618         drm_modeset_acquire_init(&ctx, 0);
13619
13620         while (1) {
13621                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
13622                 if (ret != -EDEADLK)
13623                         break;
13624
13625                 drm_modeset_backoff(&ctx);
13626         }
13627
13628         if (!ret)
13629                 ret = __intel_display_resume(dev, state, &ctx);
13630
13631         intel_enable_ipc(dev_priv);
13632         drm_modeset_drop_locks(&ctx);
13633         drm_modeset_acquire_fini(&ctx);
13634
13635         if (ret)
13636                 drm_err(&dev_priv->drm,
13637                         "Restoring old state failed with %i\n", ret);
13638         if (state)
13639                 drm_atomic_state_put(state);
13640 }
13641
13642 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13643 {
13644         struct intel_connector *connector;
13645         struct drm_connector_list_iter conn_iter;
13646
13647         /* Kill all the work that may have been queued by hpd. */
13648         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13649         for_each_intel_connector_iter(connector, &conn_iter) {
13650                 if (connector->modeset_retry_work.func)
13651                         cancel_work_sync(&connector->modeset_retry_work);
13652                 if (connector->hdcp.shim) {
13653                         cancel_delayed_work_sync(&connector->hdcp.check_work);
13654                         cancel_work_sync(&connector->hdcp.prop_work);
13655                 }
13656         }
13657         drm_connector_list_iter_end(&conn_iter);
13658 }
13659
13660 /* part #1: call before irq uninstall */
13661 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13662 {
13663         flush_workqueue(i915->flip_wq);
13664         flush_workqueue(i915->modeset_wq);
13665
13666         flush_work(&i915->atomic_helper.free_work);
13667         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13668 }
13669
13670 /* part #2: call after irq uninstall */
13671 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13672 {
13673         /*
13674          * Due to the hpd irq storm handling the hotplug work can re-arm the
13675          * poll handlers. Hence disable polling after hpd handling is shut down.
13676          */
13677         intel_hpd_poll_fini(i915);
13678
13679         /*
13680          * MST topology needs to be suspended so we don't have any calls to
13681          * fbdev after it's finalized. MST will be destroyed later as part of
13682          * drm_mode_config_cleanup()
13683          */
13684         intel_dp_mst_suspend(i915);
13685
13686         /* poll work can call into fbdev, hence clean that up afterwards */
13687         intel_fbdev_fini(i915);
13688
13689         intel_unregister_dsm_handler();
13690
13691         intel_fbc_global_disable(i915);
13692
13693         /* flush any delayed tasks or pending work */
13694         flush_scheduled_work();
13695
13696         intel_hdcp_component_fini(i915);
13697
13698         intel_mode_config_cleanup(i915);
13699
13700         intel_overlay_cleanup(i915);
13701
13702         intel_gmbus_teardown(i915);
13703
13704         destroy_workqueue(i915->flip_wq);
13705         destroy_workqueue(i915->modeset_wq);
13706
13707         intel_fbc_cleanup_cfb(i915);
13708 }
13709
13710 /* part #3: call after gem init */
13711 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13712 {
13713         intel_csr_ucode_fini(i915);
13714
13715         intel_power_domains_driver_remove(i915);
13716
13717         intel_vga_unregister(i915);
13718
13719         intel_bios_driver_remove(i915);
13720 }
13721
13722 void intel_display_driver_register(struct drm_i915_private *i915)
13723 {
13724         if (!HAS_DISPLAY(i915))
13725                 return;
13726
13727         intel_display_debugfs_register(i915);
13728
13729         /* Must be done after probing outputs */
13730         intel_opregion_register(i915);
13731         acpi_video_register();
13732
13733         intel_audio_init(i915);
13734
13735         /*
13736          * Some ports require correctly set-up hpd registers for
13737          * detection to work properly (leading to ghost connected
13738          * connector status), e.g. VGA on gm45.  Hence we can only set
13739          * up the initial fbdev config after hpd irqs are fully
13740          * enabled. We do it last so that the async config cannot run
13741          * before the connectors are registered.
13742          */
13743         intel_fbdev_initial_config_async(&i915->drm);
13744
13745         /*
13746          * We need to coordinate the hotplugs with the asynchronous
13747          * fbdev configuration, for which we use the
13748          * fbdev->async_cookie.
13749          */
13750         drm_kms_helper_poll_init(&i915->drm);
13751 }
13752
13753 void intel_display_driver_unregister(struct drm_i915_private *i915)
13754 {
13755         if (!HAS_DISPLAY(i915))
13756                 return;
13757
13758         intel_fbdev_unregister(i915);
13759         intel_audio_deinit(i915);
13760
13761         /*
13762          * After flushing the fbdev (incl. a late async config which
13763          * will have delayed queuing of a hotplug event), then flush
13764          * the hotplug events.
13765          */
13766         drm_kms_helper_poll_fini(&i915->drm);
13767         drm_atomic_helper_shutdown(&i915->drm);
13768
13769         acpi_video_unregister();
13770         intel_opregion_unregister(i915);
13771 }
13772
13773 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
13774
13775 struct intel_display_error_state {
13776
13777         u32 power_well_driver;
13778
13779         struct intel_cursor_error_state {
13780                 u32 control;
13781                 u32 position;
13782                 u32 base;
13783                 u32 size;
13784         } cursor[I915_MAX_PIPES];
13785
13786         struct intel_pipe_error_state {
13787                 bool power_domain_on;
13788                 u32 source;
13789                 u32 stat;
13790         } pipe[I915_MAX_PIPES];
13791
13792         struct intel_plane_error_state {
13793                 u32 control;
13794                 u32 stride;
13795                 u32 size;
13796                 u32 pos;
13797                 u32 addr;
13798                 u32 surface;
13799                 u32 tile_offset;
13800         } plane[I915_MAX_PIPES];
13801
13802         struct intel_transcoder_error_state {
13803                 bool available;
13804                 bool power_domain_on;
13805                 enum transcoder cpu_transcoder;
13806
13807                 u32 conf;
13808
13809                 u32 htotal;
13810                 u32 hblank;
13811                 u32 hsync;
13812                 u32 vtotal;
13813                 u32 vblank;
13814                 u32 vsync;
13815         } transcoder[5];
13816 };
13817
13818 struct intel_display_error_state *
13819 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
13820 {
13821         struct intel_display_error_state *error;
13822         int transcoders[] = {
13823                 TRANSCODER_A,
13824                 TRANSCODER_B,
13825                 TRANSCODER_C,
13826                 TRANSCODER_D,
13827                 TRANSCODER_EDP,
13828         };
13829         int i;
13830
13831         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
13832
13833         if (!HAS_DISPLAY(dev_priv))
13834                 return NULL;
13835
13836         error = kzalloc(sizeof(*error), GFP_ATOMIC);
13837         if (error == NULL)
13838                 return NULL;
13839
13840         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13841                 error->power_well_driver = intel_de_read(dev_priv,
13842                                                          HSW_PWR_WELL_CTL2);
13843
13844         for_each_pipe(dev_priv, i) {
13845                 error->pipe[i].power_domain_on =
13846                         __intel_display_power_is_enabled(dev_priv,
13847                                                          POWER_DOMAIN_PIPE(i));
13848                 if (!error->pipe[i].power_domain_on)
13849                         continue;
13850
13851                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
13852                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
13853                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
13854
13855                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
13856                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
13857                 if (INTEL_GEN(dev_priv) <= 3) {
13858                         error->plane[i].size = intel_de_read(dev_priv,
13859                                                              DSPSIZE(i));
13860                         error->plane[i].pos = intel_de_read(dev_priv,
13861                                                             DSPPOS(i));
13862                 }
13863                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13864                         error->plane[i].addr = intel_de_read(dev_priv,
13865                                                              DSPADDR(i));
13866                 if (INTEL_GEN(dev_priv) >= 4) {
13867                         error->plane[i].surface = intel_de_read(dev_priv,
13868                                                                 DSPSURF(i));
13869                         error->plane[i].tile_offset = intel_de_read(dev_priv,
13870                                                                     DSPTILEOFF(i));
13871                 }
13872
13873                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
13874
13875                 if (HAS_GMCH(dev_priv))
13876                         error->pipe[i].stat = intel_de_read(dev_priv,
13877                                                             PIPESTAT(i));
13878         }
13879
13880         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13881                 enum transcoder cpu_transcoder = transcoders[i];
13882
13883                 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
13884                         continue;
13885
13886                 error->transcoder[i].available = true;
13887                 error->transcoder[i].power_domain_on =
13888                         __intel_display_power_is_enabled(dev_priv,
13889                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13890                 if (!error->transcoder[i].power_domain_on)
13891                         continue;
13892
13893                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
13894
13895                 error->transcoder[i].conf = intel_de_read(dev_priv,
13896                                                           PIPECONF(cpu_transcoder));
13897                 error->transcoder[i].htotal = intel_de_read(dev_priv,
13898                                                             HTOTAL(cpu_transcoder));
13899                 error->transcoder[i].hblank = intel_de_read(dev_priv,
13900                                                             HBLANK(cpu_transcoder));
13901                 error->transcoder[i].hsync = intel_de_read(dev_priv,
13902                                                            HSYNC(cpu_transcoder));
13903                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
13904                                                             VTOTAL(cpu_transcoder));
13905                 error->transcoder[i].vblank = intel_de_read(dev_priv,
13906                                                             VBLANK(cpu_transcoder));
13907                 error->transcoder[i].vsync = intel_de_read(dev_priv,
13908                                                            VSYNC(cpu_transcoder));
13909         }
13910
13911         return error;
13912 }
13913
13914 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13915
13916 void
13917 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13918                                 struct intel_display_error_state *error)
13919 {
13920         struct drm_i915_private *dev_priv = m->i915;
13921         int i;
13922
13923         if (!error)
13924                 return;
13925
13926         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
13927         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13928                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13929                            error->power_well_driver);
13930         for_each_pipe(dev_priv, i) {
13931                 err_printf(m, "Pipe [%d]:\n", i);
13932                 err_printf(m, "  Power: %s\n",
13933                            onoff(error->pipe[i].power_domain_on));
13934                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13935                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13936
13937                 err_printf(m, "Plane [%d]:\n", i);
13938                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13939                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13940                 if (INTEL_GEN(dev_priv) <= 3) {
13941                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13942                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13943                 }
13944                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13945                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13946                 if (INTEL_GEN(dev_priv) >= 4) {
13947                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13948                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13949                 }
13950
13951                 err_printf(m, "Cursor [%d]:\n", i);
13952                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13953                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13954                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13955         }
13956
13957         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13958                 if (!error->transcoder[i].available)
13959                         continue;
13960
13961                 err_printf(m, "CPU transcoder: %s\n",
13962                            transcoder_name(error->transcoder[i].cpu_transcoder));
13963                 err_printf(m, "  Power: %s\n",
13964                            onoff(error->transcoder[i].power_domain_on));
13965                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13966                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13967                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13968                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13969                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13970                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13971                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13972         }
13973 }
13974
13975 #endif