drm/i915: move is_ccs_modifier to an inline
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_display_debugfs.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dp_mst.h"
51 #include "display/intel_dpll.h"
52 #include "display/intel_dpll_mgr.h"
53 #include "display/intel_dsi.h"
54 #include "display/intel_dvo.h"
55 #include "display/intel_gmbus.h"
56 #include "display/intel_hdmi.h"
57 #include "display/intel_lvds.h"
58 #include "display/intel_sdvo.h"
59 #include "display/intel_tv.h"
60 #include "display/intel_vdsc.h"
61 #include "display/intel_vrr.h"
62
63 #include "gem/i915_gem_object.h"
64
65 #include "gt/intel_rps.h"
66
67 #include "i915_drv.h"
68 #include "i915_trace.h"
69 #include "intel_acpi.h"
70 #include "intel_atomic.h"
71 #include "intel_atomic_plane.h"
72 #include "intel_bw.h"
73 #include "intel_cdclk.h"
74 #include "intel_color.h"
75 #include "intel_crtc.h"
76 #include "intel_csr.h"
77 #include "intel_display_types.h"
78 #include "intel_dp_link_training.h"
79 #include "intel_fbc.h"
80 #include "intel_fdi.h"
81 #include "intel_fbdev.h"
82 #include "intel_fifo_underrun.h"
83 #include "intel_frontbuffer.h"
84 #include "intel_hdcp.h"
85 #include "intel_hotplug.h"
86 #include "intel_overlay.h"
87 #include "intel_pipe_crc.h"
88 #include "intel_pm.h"
89 #include "intel_pps.h"
90 #include "intel_psr.h"
91 #include "intel_quirks.h"
92 #include "intel_sideband.h"
93 #include "intel_sprite.h"
94 #include "intel_tc.h"
95 #include "intel_vga.h"
96 #include "i9xx_plane.h"
97 #include "skl_universal_plane.h"
98
99 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
100                                 struct intel_crtc_state *pipe_config);
101 static void ilk_pch_clock_get(struct intel_crtc *crtc,
102                               struct intel_crtc_state *pipe_config);
103
104 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
105                                   struct drm_i915_gem_object *obj,
106                                   struct drm_mode_fb_cmd2 *mode_cmd);
107 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
108 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
109 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
110                                          const struct intel_link_m_n *m_n,
111                                          const struct intel_link_m_n *m2_n2);
112 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
114 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
115 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
116 static void vlv_prepare_pll(struct intel_crtc *crtc,
117                             const struct intel_crtc_state *pipe_config);
118 static void chv_prepare_pll(struct intel_crtc *crtc,
119                             const struct intel_crtc_state *pipe_config);
120 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
121 static void intel_modeset_setup_hw_state(struct drm_device *dev,
122                                          struct drm_modeset_acquire_ctx *ctx);
123
124 /* returns HPLL frequency in kHz */
125 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
126 {
127         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
128
129         /* Obtain SKU information */
130         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
131                 CCK_FUSE_HPLL_FREQ_MASK;
132
133         return vco_freq[hpll_freq] * 1000;
134 }
135
136 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
137                       const char *name, u32 reg, int ref_freq)
138 {
139         u32 val;
140         int divider;
141
142         val = vlv_cck_read(dev_priv, reg);
143         divider = val & CCK_FREQUENCY_VALUES;
144
145         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
146                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
147                  "%s change in progress\n", name);
148
149         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
150 }
151
152 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153                            const char *name, u32 reg)
154 {
155         int hpll;
156
157         vlv_cck_get(dev_priv);
158
159         if (dev_priv->hpll_freq == 0)
160                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
161
162         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
163
164         vlv_cck_put(dev_priv);
165
166         return hpll;
167 }
168
169 static void intel_update_czclk(struct drm_i915_private *dev_priv)
170 {
171         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
172                 return;
173
174         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
175                                                       CCK_CZ_CLOCK_CONTROL);
176
177         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
178                 dev_priv->czclk_freq);
179 }
180
181 /* WA Display #0827: Gen9:all */
182 static void
183 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
184 {
185         if (enable)
186                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
187                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
188         else
189                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
190                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
191 }
192
193 /* Wa_2006604312:icl,ehl */
194 static void
195 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
196                        bool enable)
197 {
198         if (enable)
199                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
200                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
201         else
202                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
203                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
204 }
205
206 static bool
207 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
208 {
209         return crtc_state->master_transcoder != INVALID_TRANSCODER;
210 }
211
212 static bool
213 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
214 {
215         return crtc_state->sync_mode_slaves_mask != 0;
216 }
217
218 bool
219 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
220 {
221         return is_trans_port_sync_master(crtc_state) ||
222                 is_trans_port_sync_slave(crtc_state);
223 }
224
225 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
226                                     enum pipe pipe)
227 {
228         i915_reg_t reg = PIPEDSL(pipe);
229         u32 line1, line2;
230         u32 line_mask;
231
232         if (IS_GEN(dev_priv, 2))
233                 line_mask = DSL_LINEMASK_GEN2;
234         else
235                 line_mask = DSL_LINEMASK_GEN3;
236
237         line1 = intel_de_read(dev_priv, reg) & line_mask;
238         msleep(5);
239         line2 = intel_de_read(dev_priv, reg) & line_mask;
240
241         return line1 != line2;
242 }
243
244 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
245 {
246         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
247         enum pipe pipe = crtc->pipe;
248
249         /* Wait for the display line to settle/start moving */
250         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
251                 drm_err(&dev_priv->drm,
252                         "pipe %c scanline %s wait timed out\n",
253                         pipe_name(pipe), onoff(state));
254 }
255
256 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
257 {
258         wait_for_pipe_scanline_moving(crtc, false);
259 }
260
261 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
262 {
263         wait_for_pipe_scanline_moving(crtc, true);
264 }
265
266 static void
267 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
268 {
269         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
270         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
271
272         if (INTEL_GEN(dev_priv) >= 4) {
273                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
274                 i915_reg_t reg = PIPECONF(cpu_transcoder);
275
276                 /* Wait for the Pipe State to go off */
277                 if (intel_de_wait_for_clear(dev_priv, reg,
278                                             I965_PIPECONF_ACTIVE, 100))
279                         drm_WARN(&dev_priv->drm, 1,
280                                  "pipe_off wait timed out\n");
281         } else {
282                 intel_wait_for_pipe_scanline_stopped(crtc);
283         }
284 }
285
286 /* Only for pre-ILK configs */
287 void assert_pll(struct drm_i915_private *dev_priv,
288                 enum pipe pipe, bool state)
289 {
290         u32 val;
291         bool cur_state;
292
293         val = intel_de_read(dev_priv, DPLL(pipe));
294         cur_state = !!(val & DPLL_VCO_ENABLE);
295         I915_STATE_WARN(cur_state != state,
296              "PLL state assertion failure (expected %s, current %s)\n",
297                         onoff(state), onoff(cur_state));
298 }
299
300 /* XXX: the dsi pll is shared between MIPI DSI ports */
301 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
302 {
303         u32 val;
304         bool cur_state;
305
306         vlv_cck_get(dev_priv);
307         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
308         vlv_cck_put(dev_priv);
309
310         cur_state = val & DSI_PLL_VCO_EN;
311         I915_STATE_WARN(cur_state != state,
312              "DSI PLL state assertion failure (expected %s, current %s)\n",
313                         onoff(state), onoff(cur_state));
314 }
315
316 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
317                           enum pipe pipe, bool state)
318 {
319         bool cur_state;
320
321         if (HAS_DDI(dev_priv)) {
322                 /*
323                  * DDI does not have a specific FDI_TX register.
324                  *
325                  * FDI is never fed from EDP transcoder
326                  * so pipe->transcoder cast is fine here.
327                  */
328                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
329                 u32 val = intel_de_read(dev_priv,
330                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
331                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
332         } else {
333                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
334                 cur_state = !!(val & FDI_TX_ENABLE);
335         }
336         I915_STATE_WARN(cur_state != state,
337              "FDI TX state assertion failure (expected %s, current %s)\n",
338                         onoff(state), onoff(cur_state));
339 }
340 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
341 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
342
343 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
344                           enum pipe pipe, bool state)
345 {
346         u32 val;
347         bool cur_state;
348
349         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
350         cur_state = !!(val & FDI_RX_ENABLE);
351         I915_STATE_WARN(cur_state != state,
352              "FDI RX state assertion failure (expected %s, current %s)\n",
353                         onoff(state), onoff(cur_state));
354 }
355 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
356 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
357
358 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
359                                       enum pipe pipe)
360 {
361         u32 val;
362
363         /* ILK FDI PLL is always enabled */
364         if (IS_GEN(dev_priv, 5))
365                 return;
366
367         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
368         if (HAS_DDI(dev_priv))
369                 return;
370
371         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
372         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
373 }
374
375 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
376                        enum pipe pipe, bool state)
377 {
378         u32 val;
379         bool cur_state;
380
381         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
382         cur_state = !!(val & FDI_RX_PLL_ENABLE);
383         I915_STATE_WARN(cur_state != state,
384              "FDI RX PLL assertion failure (expected %s, current %s)\n",
385                         onoff(state), onoff(cur_state));
386 }
387
388 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
389 {
390         i915_reg_t pp_reg;
391         u32 val;
392         enum pipe panel_pipe = INVALID_PIPE;
393         bool locked = true;
394
395         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
396                 return;
397
398         if (HAS_PCH_SPLIT(dev_priv)) {
399                 u32 port_sel;
400
401                 pp_reg = PP_CONTROL(0);
402                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
403
404                 switch (port_sel) {
405                 case PANEL_PORT_SELECT_LVDS:
406                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
407                         break;
408                 case PANEL_PORT_SELECT_DPA:
409                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
410                         break;
411                 case PANEL_PORT_SELECT_DPC:
412                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
413                         break;
414                 case PANEL_PORT_SELECT_DPD:
415                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
416                         break;
417                 default:
418                         MISSING_CASE(port_sel);
419                         break;
420                 }
421         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
422                 /* presumably write lock depends on pipe, not port select */
423                 pp_reg = PP_CONTROL(pipe);
424                 panel_pipe = pipe;
425         } else {
426                 u32 port_sel;
427
428                 pp_reg = PP_CONTROL(0);
429                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
430
431                 drm_WARN_ON(&dev_priv->drm,
432                             port_sel != PANEL_PORT_SELECT_LVDS);
433                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
434         }
435
436         val = intel_de_read(dev_priv, pp_reg);
437         if (!(val & PANEL_POWER_ON) ||
438             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
439                 locked = false;
440
441         I915_STATE_WARN(panel_pipe == pipe && locked,
442              "panel assertion failure, pipe %c regs locked\n",
443              pipe_name(pipe));
444 }
445
446 void assert_pipe(struct drm_i915_private *dev_priv,
447                  enum transcoder cpu_transcoder, bool state)
448 {
449         bool cur_state;
450         enum intel_display_power_domain power_domain;
451         intel_wakeref_t wakeref;
452
453         /* we keep both pipes enabled on 830 */
454         if (IS_I830(dev_priv))
455                 state = true;
456
457         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
458         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
459         if (wakeref) {
460                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
461                 cur_state = !!(val & PIPECONF_ENABLE);
462
463                 intel_display_power_put(dev_priv, power_domain, wakeref);
464         } else {
465                 cur_state = false;
466         }
467
468         I915_STATE_WARN(cur_state != state,
469                         "transcoder %s assertion failure (expected %s, current %s)\n",
470                         transcoder_name(cpu_transcoder),
471                         onoff(state), onoff(cur_state));
472 }
473
474 static void assert_plane(struct intel_plane *plane, bool state)
475 {
476         enum pipe pipe;
477         bool cur_state;
478
479         cur_state = plane->get_hw_state(plane, &pipe);
480
481         I915_STATE_WARN(cur_state != state,
482                         "%s assertion failure (expected %s, current %s)\n",
483                         plane->base.name, onoff(state), onoff(cur_state));
484 }
485
486 #define assert_plane_enabled(p) assert_plane(p, true)
487 #define assert_plane_disabled(p) assert_plane(p, false)
488
489 static void assert_planes_disabled(struct intel_crtc *crtc)
490 {
491         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492         struct intel_plane *plane;
493
494         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
495                 assert_plane_disabled(plane);
496 }
497
498 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
499                                     enum pipe pipe)
500 {
501         u32 val;
502         bool enabled;
503
504         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
505         enabled = !!(val & TRANS_ENABLE);
506         I915_STATE_WARN(enabled,
507              "transcoder assertion failed, should be off on pipe %c but is still active\n",
508              pipe_name(pipe));
509 }
510
511 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
512                                    enum pipe pipe, enum port port,
513                                    i915_reg_t dp_reg)
514 {
515         enum pipe port_pipe;
516         bool state;
517
518         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
519
520         I915_STATE_WARN(state && port_pipe == pipe,
521                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
522                         port_name(port), pipe_name(pipe));
523
524         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
525                         "IBX PCH DP %c still using transcoder B\n",
526                         port_name(port));
527 }
528
529 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
530                                      enum pipe pipe, enum port port,
531                                      i915_reg_t hdmi_reg)
532 {
533         enum pipe port_pipe;
534         bool state;
535
536         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
537
538         I915_STATE_WARN(state && port_pipe == pipe,
539                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
540                         port_name(port), pipe_name(pipe));
541
542         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
543                         "IBX PCH HDMI %c still using transcoder B\n",
544                         port_name(port));
545 }
546
547 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
548                                       enum pipe pipe)
549 {
550         enum pipe port_pipe;
551
552         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
553         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
554         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
555
556         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
557                         port_pipe == pipe,
558                         "PCH VGA enabled on transcoder %c, should be disabled\n",
559                         pipe_name(pipe));
560
561         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
562                         port_pipe == pipe,
563                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
564                         pipe_name(pipe));
565
566         /* PCH SDVOB multiplex with HDMIB */
567         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
568         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
569         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
570 }
571
572 static void _vlv_enable_pll(struct intel_crtc *crtc,
573                             const struct intel_crtc_state *pipe_config)
574 {
575         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
576         enum pipe pipe = crtc->pipe;
577
578         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
579         intel_de_posting_read(dev_priv, DPLL(pipe));
580         udelay(150);
581
582         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
583                 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
584 }
585
586 static void vlv_enable_pll(struct intel_crtc *crtc,
587                            const struct intel_crtc_state *pipe_config)
588 {
589         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
590         enum pipe pipe = crtc->pipe;
591
592         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
593
594         /* PLL is protected by panel, make sure we can write it */
595         assert_panel_unlocked(dev_priv, pipe);
596
597         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
598                 _vlv_enable_pll(crtc, pipe_config);
599
600         intel_de_write(dev_priv, DPLL_MD(pipe),
601                        pipe_config->dpll_hw_state.dpll_md);
602         intel_de_posting_read(dev_priv, DPLL_MD(pipe));
603 }
604
605
606 static void _chv_enable_pll(struct intel_crtc *crtc,
607                             const struct intel_crtc_state *pipe_config)
608 {
609         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
610         enum pipe pipe = crtc->pipe;
611         enum dpio_channel port = vlv_pipe_to_channel(pipe);
612         u32 tmp;
613
614         vlv_dpio_get(dev_priv);
615
616         /* Enable back the 10bit clock to display controller */
617         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
618         tmp |= DPIO_DCLKP_EN;
619         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
620
621         vlv_dpio_put(dev_priv);
622
623         /*
624          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
625          */
626         udelay(1);
627
628         /* Enable PLL */
629         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
630
631         /* Check PLL is locked */
632         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
633                 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
634 }
635
636 static void chv_enable_pll(struct intel_crtc *crtc,
637                            const struct intel_crtc_state *pipe_config)
638 {
639         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
640         enum pipe pipe = crtc->pipe;
641
642         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
643
644         /* PLL is protected by panel, make sure we can write it */
645         assert_panel_unlocked(dev_priv, pipe);
646
647         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
648                 _chv_enable_pll(crtc, pipe_config);
649
650         if (pipe != PIPE_A) {
651                 /*
652                  * WaPixelRepeatModeFixForC0:chv
653                  *
654                  * DPLLCMD is AWOL. Use chicken bits to propagate
655                  * the value from DPLLBMD to either pipe B or C.
656                  */
657                 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
658                 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
659                                pipe_config->dpll_hw_state.dpll_md);
660                 intel_de_write(dev_priv, CBR4_VLV, 0);
661                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
662
663                 /*
664                  * DPLLB VGA mode also seems to cause problems.
665                  * We should always have it disabled.
666                  */
667                 drm_WARN_ON(&dev_priv->drm,
668                             (intel_de_read(dev_priv, DPLL(PIPE_B)) &
669                              DPLL_VGA_MODE_DIS) == 0);
670         } else {
671                 intel_de_write(dev_priv, DPLL_MD(pipe),
672                                pipe_config->dpll_hw_state.dpll_md);
673                 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
674         }
675 }
676
677 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
678 {
679         if (IS_I830(dev_priv))
680                 return false;
681
682         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
683 }
684
685 static void i9xx_enable_pll(struct intel_crtc *crtc,
686                             const struct intel_crtc_state *crtc_state)
687 {
688         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
689         i915_reg_t reg = DPLL(crtc->pipe);
690         u32 dpll = crtc_state->dpll_hw_state.dpll;
691         int i;
692
693         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
694
695         /* PLL is protected by panel, make sure we can write it */
696         if (i9xx_has_pps(dev_priv))
697                 assert_panel_unlocked(dev_priv, crtc->pipe);
698
699         /*
700          * Apparently we need to have VGA mode enabled prior to changing
701          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
702          * dividers, even though the register value does change.
703          */
704         intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
705         intel_de_write(dev_priv, reg, dpll);
706
707         /* Wait for the clocks to stabilize. */
708         intel_de_posting_read(dev_priv, reg);
709         udelay(150);
710
711         if (INTEL_GEN(dev_priv) >= 4) {
712                 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
713                                crtc_state->dpll_hw_state.dpll_md);
714         } else {
715                 /* The pixel multiplier can only be updated once the
716                  * DPLL is enabled and the clocks are stable.
717                  *
718                  * So write it again.
719                  */
720                 intel_de_write(dev_priv, reg, dpll);
721         }
722
723         /* We do this three times for luck */
724         for (i = 0; i < 3; i++) {
725                 intel_de_write(dev_priv, reg, dpll);
726                 intel_de_posting_read(dev_priv, reg);
727                 udelay(150); /* wait for warmup */
728         }
729 }
730
731 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
732 {
733         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
734         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
735         enum pipe pipe = crtc->pipe;
736
737         /* Don't disable pipe or pipe PLLs if needed */
738         if (IS_I830(dev_priv))
739                 return;
740
741         /* Make sure the pipe isn't still relying on us */
742         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
743
744         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
745         intel_de_posting_read(dev_priv, DPLL(pipe));
746 }
747
748 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
749 {
750         u32 val;
751
752         /* Make sure the pipe isn't still relying on us */
753         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
754
755         val = DPLL_INTEGRATED_REF_CLK_VLV |
756                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
757         if (pipe != PIPE_A)
758                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
759
760         intel_de_write(dev_priv, DPLL(pipe), val);
761         intel_de_posting_read(dev_priv, DPLL(pipe));
762 }
763
764 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
765 {
766         enum dpio_channel port = vlv_pipe_to_channel(pipe);
767         u32 val;
768
769         /* Make sure the pipe isn't still relying on us */
770         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
771
772         val = DPLL_SSC_REF_CLK_CHV |
773                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
774         if (pipe != PIPE_A)
775                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
776
777         intel_de_write(dev_priv, DPLL(pipe), val);
778         intel_de_posting_read(dev_priv, DPLL(pipe));
779
780         vlv_dpio_get(dev_priv);
781
782         /* Disable 10bit clock to display controller */
783         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
784         val &= ~DPIO_DCLKP_EN;
785         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
786
787         vlv_dpio_put(dev_priv);
788 }
789
790 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
791                          struct intel_digital_port *dig_port,
792                          unsigned int expected_mask)
793 {
794         u32 port_mask;
795         i915_reg_t dpll_reg;
796
797         switch (dig_port->base.port) {
798         case PORT_B:
799                 port_mask = DPLL_PORTB_READY_MASK;
800                 dpll_reg = DPLL(0);
801                 break;
802         case PORT_C:
803                 port_mask = DPLL_PORTC_READY_MASK;
804                 dpll_reg = DPLL(0);
805                 expected_mask <<= 4;
806                 break;
807         case PORT_D:
808                 port_mask = DPLL_PORTD_READY_MASK;
809                 dpll_reg = DPIO_PHY_STATUS;
810                 break;
811         default:
812                 BUG();
813         }
814
815         if (intel_de_wait_for_register(dev_priv, dpll_reg,
816                                        port_mask, expected_mask, 1000))
817                 drm_WARN(&dev_priv->drm, 1,
818                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
819                          dig_port->base.base.base.id, dig_port->base.base.name,
820                          intel_de_read(dev_priv, dpll_reg) & port_mask,
821                          expected_mask);
822 }
823
824 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
825 {
826         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
827         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
828         enum pipe pipe = crtc->pipe;
829         i915_reg_t reg;
830         u32 val, pipeconf_val;
831
832         /* Make sure PCH DPLL is enabled */
833         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
834
835         /* FDI must be feeding us bits for PCH ports */
836         assert_fdi_tx_enabled(dev_priv, pipe);
837         assert_fdi_rx_enabled(dev_priv, pipe);
838
839         if (HAS_PCH_CPT(dev_priv)) {
840                 reg = TRANS_CHICKEN2(pipe);
841                 val = intel_de_read(dev_priv, reg);
842                 /*
843                  * Workaround: Set the timing override bit
844                  * before enabling the pch transcoder.
845                  */
846                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
847                 /* Configure frame start delay to match the CPU */
848                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
849                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
850                 intel_de_write(dev_priv, reg, val);
851         }
852
853         reg = PCH_TRANSCONF(pipe);
854         val = intel_de_read(dev_priv, reg);
855         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
856
857         if (HAS_PCH_IBX(dev_priv)) {
858                 /* Configure frame start delay to match the CPU */
859                 val &= ~TRANS_FRAME_START_DELAY_MASK;
860                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
861
862                 /*
863                  * Make the BPC in transcoder be consistent with
864                  * that in pipeconf reg. For HDMI we must use 8bpc
865                  * here for both 8bpc and 12bpc.
866                  */
867                 val &= ~PIPECONF_BPC_MASK;
868                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
869                         val |= PIPECONF_8BPC;
870                 else
871                         val |= pipeconf_val & PIPECONF_BPC_MASK;
872         }
873
874         val &= ~TRANS_INTERLACE_MASK;
875         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
876                 if (HAS_PCH_IBX(dev_priv) &&
877                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
878                         val |= TRANS_LEGACY_INTERLACED_ILK;
879                 else
880                         val |= TRANS_INTERLACED;
881         } else {
882                 val |= TRANS_PROGRESSIVE;
883         }
884
885         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
886         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
887                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
888                         pipe_name(pipe));
889 }
890
891 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
892                                       enum transcoder cpu_transcoder)
893 {
894         u32 val, pipeconf_val;
895
896         /* FDI must be feeding us bits for PCH ports */
897         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
898         assert_fdi_rx_enabled(dev_priv, PIPE_A);
899
900         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
901         /* Workaround: set timing override bit. */
902         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
903         /* Configure frame start delay to match the CPU */
904         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
905         val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
906         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
907
908         val = TRANS_ENABLE;
909         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
910
911         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
912             PIPECONF_INTERLACED_ILK)
913                 val |= TRANS_INTERLACED;
914         else
915                 val |= TRANS_PROGRESSIVE;
916
917         intel_de_write(dev_priv, LPT_TRANSCONF, val);
918         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
919                                   TRANS_STATE_ENABLE, 100))
920                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
921 }
922
923 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
924                                        enum pipe pipe)
925 {
926         i915_reg_t reg;
927         u32 val;
928
929         /* FDI relies on the transcoder */
930         assert_fdi_tx_disabled(dev_priv, pipe);
931         assert_fdi_rx_disabled(dev_priv, pipe);
932
933         /* Ports must be off as well */
934         assert_pch_ports_disabled(dev_priv, pipe);
935
936         reg = PCH_TRANSCONF(pipe);
937         val = intel_de_read(dev_priv, reg);
938         val &= ~TRANS_ENABLE;
939         intel_de_write(dev_priv, reg, val);
940         /* wait for PCH transcoder off, transcoder state */
941         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
942                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
943                         pipe_name(pipe));
944
945         if (HAS_PCH_CPT(dev_priv)) {
946                 /* Workaround: Clear the timing override chicken bit again. */
947                 reg = TRANS_CHICKEN2(pipe);
948                 val = intel_de_read(dev_priv, reg);
949                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
950                 intel_de_write(dev_priv, reg, val);
951         }
952 }
953
954 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
955 {
956         u32 val;
957
958         val = intel_de_read(dev_priv, LPT_TRANSCONF);
959         val &= ~TRANS_ENABLE;
960         intel_de_write(dev_priv, LPT_TRANSCONF, val);
961         /* wait for PCH transcoder off, transcoder state */
962         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
963                                     TRANS_STATE_ENABLE, 50))
964                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
965
966         /* Workaround: clear timing override bit. */
967         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
968         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
969         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
970 }
971
972 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
973 {
974         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
975
976         if (HAS_PCH_LPT(dev_priv))
977                 return PIPE_A;
978         else
979                 return crtc->pipe;
980 }
981
982 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
983 {
984         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
985         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
986         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
987         enum pipe pipe = crtc->pipe;
988         i915_reg_t reg;
989         u32 val;
990
991         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
992
993         assert_planes_disabled(crtc);
994
995         /*
996          * A pipe without a PLL won't actually be able to drive bits from
997          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
998          * need the check.
999          */
1000         if (HAS_GMCH(dev_priv)) {
1001                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1002                         assert_dsi_pll_enabled(dev_priv);
1003                 else
1004                         assert_pll_enabled(dev_priv, pipe);
1005         } else {
1006                 if (new_crtc_state->has_pch_encoder) {
1007                         /* if driving the PCH, we need FDI enabled */
1008                         assert_fdi_rx_pll_enabled(dev_priv,
1009                                                   intel_crtc_pch_transcoder(crtc));
1010                         assert_fdi_tx_pll_enabled(dev_priv,
1011                                                   (enum pipe) cpu_transcoder);
1012                 }
1013                 /* FIXME: assert CPU port conditions for SNB+ */
1014         }
1015
1016         trace_intel_pipe_enable(crtc);
1017
1018         reg = PIPECONF(cpu_transcoder);
1019         val = intel_de_read(dev_priv, reg);
1020         if (val & PIPECONF_ENABLE) {
1021                 /* we keep both pipes enabled on 830 */
1022                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1023                 return;
1024         }
1025
1026         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1027         intel_de_posting_read(dev_priv, reg);
1028
1029         /*
1030          * Until the pipe starts PIPEDSL reads will return a stale value,
1031          * which causes an apparent vblank timestamp jump when PIPEDSL
1032          * resets to its proper value. That also messes up the frame count
1033          * when it's derived from the timestamps. So let's wait for the
1034          * pipe to start properly before we call drm_crtc_vblank_on()
1035          */
1036         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1037                 intel_wait_for_pipe_scanline_moving(crtc);
1038 }
1039
1040 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1041 {
1042         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1043         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1044         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1045         enum pipe pipe = crtc->pipe;
1046         i915_reg_t reg;
1047         u32 val;
1048
1049         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1050
1051         /*
1052          * Make sure planes won't keep trying to pump pixels to us,
1053          * or we might hang the display.
1054          */
1055         assert_planes_disabled(crtc);
1056
1057         trace_intel_pipe_disable(crtc);
1058
1059         reg = PIPECONF(cpu_transcoder);
1060         val = intel_de_read(dev_priv, reg);
1061         if ((val & PIPECONF_ENABLE) == 0)
1062                 return;
1063
1064         /*
1065          * Double wide has implications for planes
1066          * so best keep it disabled when not needed.
1067          */
1068         if (old_crtc_state->double_wide)
1069                 val &= ~PIPECONF_DOUBLE_WIDE;
1070
1071         /* Don't disable pipe or pipe PLLs if needed */
1072         if (!IS_I830(dev_priv))
1073                 val &= ~PIPECONF_ENABLE;
1074
1075         intel_de_write(dev_priv, reg, val);
1076         if ((val & PIPECONF_ENABLE) == 0)
1077                 intel_wait_for_pipe_off(old_crtc_state);
1078 }
1079
1080 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1081 {
1082         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1083 }
1084
1085 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1086 {
1087         if (is_ccs_modifier(fb->modifier))
1088                 return is_ccs_plane(fb, plane);
1089
1090         return plane == 1;
1091 }
1092
1093 bool
1094 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1095                                     u64 modifier)
1096 {
1097         return info->is_yuv &&
1098                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1099 }
1100
1101 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
1102                                    int color_plane)
1103 {
1104         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
1105                color_plane == 1;
1106 }
1107
1108 unsigned int
1109 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1110 {
1111         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1112         unsigned int cpp = fb->format->cpp[color_plane];
1113
1114         switch (fb->modifier) {
1115         case DRM_FORMAT_MOD_LINEAR:
1116                 return intel_tile_size(dev_priv);
1117         case I915_FORMAT_MOD_X_TILED:
1118                 if (IS_GEN(dev_priv, 2))
1119                         return 128;
1120                 else
1121                         return 512;
1122         case I915_FORMAT_MOD_Y_TILED_CCS:
1123                 if (is_ccs_plane(fb, color_plane))
1124                         return 128;
1125                 fallthrough;
1126         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1127         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1128         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1129                 if (is_ccs_plane(fb, color_plane))
1130                         return 64;
1131                 fallthrough;
1132         case I915_FORMAT_MOD_Y_TILED:
1133                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1134                         return 128;
1135                 else
1136                         return 512;
1137         case I915_FORMAT_MOD_Yf_TILED_CCS:
1138                 if (is_ccs_plane(fb, color_plane))
1139                         return 128;
1140                 fallthrough;
1141         case I915_FORMAT_MOD_Yf_TILED:
1142                 switch (cpp) {
1143                 case 1:
1144                         return 64;
1145                 case 2:
1146                 case 4:
1147                         return 128;
1148                 case 8:
1149                 case 16:
1150                         return 256;
1151                 default:
1152                         MISSING_CASE(cpp);
1153                         return cpp;
1154                 }
1155                 break;
1156         default:
1157                 MISSING_CASE(fb->modifier);
1158                 return cpp;
1159         }
1160 }
1161
1162 unsigned int
1163 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1164 {
1165         if (is_gen12_ccs_plane(fb, color_plane))
1166                 return 1;
1167
1168         return intel_tile_size(to_i915(fb->dev)) /
1169                 intel_tile_width_bytes(fb, color_plane);
1170 }
1171
1172 /* Return the tile dimensions in pixel units */
1173 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1174                             unsigned int *tile_width,
1175                             unsigned int *tile_height)
1176 {
1177         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1178         unsigned int cpp = fb->format->cpp[color_plane];
1179
1180         *tile_width = tile_width_bytes / cpp;
1181         *tile_height = intel_tile_height(fb, color_plane);
1182 }
1183
1184 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
1185                                         int color_plane)
1186 {
1187         unsigned int tile_width, tile_height;
1188
1189         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1190
1191         return fb->pitches[color_plane] * tile_height;
1192 }
1193
1194 unsigned int
1195 intel_fb_align_height(const struct drm_framebuffer *fb,
1196                       int color_plane, unsigned int height)
1197 {
1198         unsigned int tile_height = intel_tile_height(fb, color_plane);
1199
1200         return ALIGN(height, tile_height);
1201 }
1202
1203 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1204 {
1205         unsigned int size = 0;
1206         int i;
1207
1208         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1209                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1210
1211         return size;
1212 }
1213
1214 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1215 {
1216         unsigned int size = 0;
1217         int i;
1218
1219         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1220                 size += rem_info->plane[i].width * rem_info->plane[i].height;
1221
1222         return size;
1223 }
1224
1225 static void
1226 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1227                         const struct drm_framebuffer *fb,
1228                         unsigned int rotation)
1229 {
1230         view->type = I915_GGTT_VIEW_NORMAL;
1231         if (drm_rotation_90_or_270(rotation)) {
1232                 view->type = I915_GGTT_VIEW_ROTATED;
1233                 view->rotated = to_intel_framebuffer(fb)->rot_info;
1234         }
1235 }
1236
1237 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1238 {
1239         if (IS_I830(dev_priv))
1240                 return 16 * 1024;
1241         else if (IS_I85X(dev_priv))
1242                 return 256;
1243         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1244                 return 32;
1245         else
1246                 return 4 * 1024;
1247 }
1248
1249 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
1250 {
1251         if (INTEL_GEN(dev_priv) >= 9)
1252                 return 256 * 1024;
1253         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
1254                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1255                 return 128 * 1024;
1256         else if (INTEL_GEN(dev_priv) >= 4)
1257                 return 4 * 1024;
1258         else
1259                 return 0;
1260 }
1261
1262 static bool has_async_flips(struct drm_i915_private *i915)
1263 {
1264         return INTEL_GEN(i915) >= 5;
1265 }
1266
1267 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
1268                                   int color_plane)
1269 {
1270         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1271
1272         /* AUX_DIST needs only 4K alignment */
1273         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
1274             is_ccs_plane(fb, color_plane))
1275                 return 4096;
1276
1277         switch (fb->modifier) {
1278         case DRM_FORMAT_MOD_LINEAR:
1279                 return intel_linear_alignment(dev_priv);
1280         case I915_FORMAT_MOD_X_TILED:
1281                 if (has_async_flips(dev_priv))
1282                         return 256 * 1024;
1283                 return 0;
1284         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1285                 if (is_semiplanar_uv_plane(fb, color_plane))
1286                         return intel_tile_row_size(fb, color_plane);
1287                 fallthrough;
1288         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1289         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1290                 return 16 * 1024;
1291         case I915_FORMAT_MOD_Y_TILED_CCS:
1292         case I915_FORMAT_MOD_Yf_TILED_CCS:
1293         case I915_FORMAT_MOD_Y_TILED:
1294                 if (INTEL_GEN(dev_priv) >= 12 &&
1295                     is_semiplanar_uv_plane(fb, color_plane))
1296                         return intel_tile_row_size(fb, color_plane);
1297                 fallthrough;
1298         case I915_FORMAT_MOD_Yf_TILED:
1299                 return 1 * 1024 * 1024;
1300         default:
1301                 MISSING_CASE(fb->modifier);
1302                 return 0;
1303         }
1304 }
1305
1306 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1307 {
1308         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1309         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1310
1311         return INTEL_GEN(dev_priv) < 4 ||
1312                 (plane->has_fbc &&
1313                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
1314 }
1315
1316 struct i915_vma *
1317 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1318                            const struct i915_ggtt_view *view,
1319                            bool uses_fence,
1320                            unsigned long *out_flags)
1321 {
1322         struct drm_device *dev = fb->dev;
1323         struct drm_i915_private *dev_priv = to_i915(dev);
1324         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1325         intel_wakeref_t wakeref;
1326         struct i915_vma *vma;
1327         unsigned int pinctl;
1328         u32 alignment;
1329
1330         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1331                 return ERR_PTR(-EINVAL);
1332
1333         alignment = intel_surf_alignment(fb, 0);
1334         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1335                 return ERR_PTR(-EINVAL);
1336
1337         /* Note that the w/a also requires 64 PTE of padding following the
1338          * bo. We currently fill all unused PTE with the shadow page and so
1339          * we should always have valid PTE following the scanout preventing
1340          * the VT-d warning.
1341          */
1342         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1343                 alignment = 256 * 1024;
1344
1345         /*
1346          * Global gtt pte registers are special registers which actually forward
1347          * writes to a chunk of system memory. Which means that there is no risk
1348          * that the register values disappear as soon as we call
1349          * intel_runtime_pm_put(), so it is correct to wrap only the
1350          * pin/unpin/fence and not more.
1351          */
1352         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1353
1354         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1355
1356         /*
1357          * Valleyview is definitely limited to scanning out the first
1358          * 512MiB. Lets presume this behaviour was inherited from the
1359          * g4x display engine and that all earlier gen are similarly
1360          * limited. Testing suggests that it is a little more
1361          * complicated than this. For example, Cherryview appears quite
1362          * happy to scanout from anywhere within its global aperture.
1363          */
1364         pinctl = 0;
1365         if (HAS_GMCH(dev_priv))
1366                 pinctl |= PIN_MAPPABLE;
1367
1368         vma = i915_gem_object_pin_to_display_plane(obj,
1369                                                    alignment, view, pinctl);
1370         if (IS_ERR(vma))
1371                 goto err;
1372
1373         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1374                 int ret;
1375
1376                 /*
1377                  * Install a fence for tiled scan-out. Pre-i965 always needs a
1378                  * fence, whereas 965+ only requires a fence if using
1379                  * framebuffer compression.  For simplicity, we always, when
1380                  * possible, install a fence as the cost is not that onerous.
1381                  *
1382                  * If we fail to fence the tiled scanout, then either the
1383                  * modeset will reject the change (which is highly unlikely as
1384                  * the affected systems, all but one, do not have unmappable
1385                  * space) or we will not be able to enable full powersaving
1386                  * techniques (also likely not to apply due to various limits
1387                  * FBC and the like impose on the size of the buffer, which
1388                  * presumably we violated anyway with this unmappable buffer).
1389                  * Anyway, it is presumably better to stumble onwards with
1390                  * something and try to run the system in a "less than optimal"
1391                  * mode that matches the user configuration.
1392                  */
1393                 ret = i915_vma_pin_fence(vma);
1394                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1395                         i915_gem_object_unpin_from_display_plane(vma);
1396                         vma = ERR_PTR(ret);
1397                         goto err;
1398                 }
1399
1400                 if (ret == 0 && vma->fence)
1401                         *out_flags |= PLANE_HAS_FENCE;
1402         }
1403
1404         i915_vma_get(vma);
1405 err:
1406         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1407         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1408         return vma;
1409 }
1410
1411 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1412 {
1413         i915_gem_object_lock(vma->obj, NULL);
1414         if (flags & PLANE_HAS_FENCE)
1415                 i915_vma_unpin_fence(vma);
1416         i915_gem_object_unpin_from_display_plane(vma);
1417         i915_gem_object_unlock(vma->obj);
1418
1419         i915_vma_put(vma);
1420 }
1421
1422 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
1423                           unsigned int rotation)
1424 {
1425         if (drm_rotation_90_or_270(rotation))
1426                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
1427         else
1428                 return fb->pitches[color_plane];
1429 }
1430
1431 /*
1432  * Convert the x/y offsets into a linear offset.
1433  * Only valid with 0/180 degree rotation, which is fine since linear
1434  * offset is only used with linear buffers on pre-hsw and tiled buffers
1435  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1436  */
1437 u32 intel_fb_xy_to_linear(int x, int y,
1438                           const struct intel_plane_state *state,
1439                           int color_plane)
1440 {
1441         const struct drm_framebuffer *fb = state->hw.fb;
1442         unsigned int cpp = fb->format->cpp[color_plane];
1443         unsigned int pitch = state->color_plane[color_plane].stride;
1444
1445         return y * pitch + x * cpp;
1446 }
1447
1448 /*
1449  * Add the x/y offsets derived from fb->offsets[] to the user
1450  * specified plane src x/y offsets. The resulting x/y offsets
1451  * specify the start of scanout from the beginning of the gtt mapping.
1452  */
1453 void intel_add_fb_offsets(int *x, int *y,
1454                           const struct intel_plane_state *state,
1455                           int color_plane)
1456
1457 {
1458         *x += state->color_plane[color_plane].x;
1459         *y += state->color_plane[color_plane].y;
1460 }
1461
1462 static u32 intel_adjust_tile_offset(int *x, int *y,
1463                                     unsigned int tile_width,
1464                                     unsigned int tile_height,
1465                                     unsigned int tile_size,
1466                                     unsigned int pitch_tiles,
1467                                     u32 old_offset,
1468                                     u32 new_offset)
1469 {
1470         unsigned int pitch_pixels = pitch_tiles * tile_width;
1471         unsigned int tiles;
1472
1473         WARN_ON(old_offset & (tile_size - 1));
1474         WARN_ON(new_offset & (tile_size - 1));
1475         WARN_ON(new_offset > old_offset);
1476
1477         tiles = (old_offset - new_offset) / tile_size;
1478
1479         *y += tiles / pitch_tiles * tile_height;
1480         *x += tiles % pitch_tiles * tile_width;
1481
1482         /* minimize x in case it got needlessly big */
1483         *y += *x / pitch_pixels * tile_height;
1484         *x %= pitch_pixels;
1485
1486         return new_offset;
1487 }
1488
1489 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
1490 {
1491         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
1492                is_gen12_ccs_plane(fb, color_plane);
1493 }
1494
1495 static u32 intel_adjust_aligned_offset(int *x, int *y,
1496                                        const struct drm_framebuffer *fb,
1497                                        int color_plane,
1498                                        unsigned int rotation,
1499                                        unsigned int pitch,
1500                                        u32 old_offset, u32 new_offset)
1501 {
1502         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1503         unsigned int cpp = fb->format->cpp[color_plane];
1504
1505         drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
1506
1507         if (!is_surface_linear(fb, color_plane)) {
1508                 unsigned int tile_size, tile_width, tile_height;
1509                 unsigned int pitch_tiles;
1510
1511                 tile_size = intel_tile_size(dev_priv);
1512                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1513
1514                 if (drm_rotation_90_or_270(rotation)) {
1515                         pitch_tiles = pitch / tile_height;
1516                         swap(tile_width, tile_height);
1517                 } else {
1518                         pitch_tiles = pitch / (tile_width * cpp);
1519                 }
1520
1521                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1522                                          tile_size, pitch_tiles,
1523                                          old_offset, new_offset);
1524         } else {
1525                 old_offset += *y * pitch + *x * cpp;
1526
1527                 *y = (old_offset - new_offset) / pitch;
1528                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
1529         }
1530
1531         return new_offset;
1532 }
1533
1534 /*
1535  * Adjust the tile offset by moving the difference into
1536  * the x/y offsets.
1537  */
1538 u32 intel_plane_adjust_aligned_offset(int *x, int *y,
1539                                       const struct intel_plane_state *state,
1540                                       int color_plane,
1541                                       u32 old_offset, u32 new_offset)
1542 {
1543         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
1544                                            state->hw.rotation,
1545                                            state->color_plane[color_plane].stride,
1546                                            old_offset, new_offset);
1547 }
1548
1549 /*
1550  * Computes the aligned offset to the base tile and adjusts
1551  * x, y. bytes per pixel is assumed to be a power-of-two.
1552  *
1553  * In the 90/270 rotated case, x and y are assumed
1554  * to be already rotated to match the rotated GTT view, and
1555  * pitch is the tile_height aligned framebuffer height.
1556  *
1557  * This function is used when computing the derived information
1558  * under intel_framebuffer, so using any of that information
1559  * here is not allowed. Anything under drm_framebuffer can be
1560  * used. This is why the user has to pass in the pitch since it
1561  * is specified in the rotated orientation.
1562  */
1563 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
1564                                         int *x, int *y,
1565                                         const struct drm_framebuffer *fb,
1566                                         int color_plane,
1567                                         unsigned int pitch,
1568                                         unsigned int rotation,
1569                                         u32 alignment)
1570 {
1571         unsigned int cpp = fb->format->cpp[color_plane];
1572         u32 offset, offset_aligned;
1573
1574         if (!is_surface_linear(fb, color_plane)) {
1575                 unsigned int tile_size, tile_width, tile_height;
1576                 unsigned int tile_rows, tiles, pitch_tiles;
1577
1578                 tile_size = intel_tile_size(dev_priv);
1579                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
1580
1581                 if (drm_rotation_90_or_270(rotation)) {
1582                         pitch_tiles = pitch / tile_height;
1583                         swap(tile_width, tile_height);
1584                 } else {
1585                         pitch_tiles = pitch / (tile_width * cpp);
1586                 }
1587
1588                 tile_rows = *y / tile_height;
1589                 *y %= tile_height;
1590
1591                 tiles = *x / tile_width;
1592                 *x %= tile_width;
1593
1594                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
1595
1596                 offset_aligned = offset;
1597                 if (alignment)
1598                         offset_aligned = rounddown(offset_aligned, alignment);
1599
1600                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
1601                                          tile_size, pitch_tiles,
1602                                          offset, offset_aligned);
1603         } else {
1604                 offset = *y * pitch + *x * cpp;
1605                 offset_aligned = offset;
1606                 if (alignment) {
1607                         offset_aligned = rounddown(offset_aligned, alignment);
1608                         *y = (offset % alignment) / pitch;
1609                         *x = ((offset % alignment) - *y * pitch) / cpp;
1610                 } else {
1611                         *y = *x = 0;
1612                 }
1613         }
1614
1615         return offset_aligned;
1616 }
1617
1618 u32 intel_plane_compute_aligned_offset(int *x, int *y,
1619                                        const struct intel_plane_state *state,
1620                                        int color_plane)
1621 {
1622         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
1623         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
1624         const struct drm_framebuffer *fb = state->hw.fb;
1625         unsigned int rotation = state->hw.rotation;
1626         int pitch = state->color_plane[color_plane].stride;
1627         u32 alignment;
1628
1629         if (intel_plane->id == PLANE_CURSOR)
1630                 alignment = intel_cursor_alignment(dev_priv);
1631         else
1632                 alignment = intel_surf_alignment(fb, color_plane);
1633
1634         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
1635                                             pitch, rotation, alignment);
1636 }
1637
1638 /* Convert the fb->offset[] into x/y offsets */
1639 static int intel_fb_offset_to_xy(int *x, int *y,
1640                                  const struct drm_framebuffer *fb,
1641                                  int color_plane)
1642 {
1643         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1644         unsigned int height;
1645         u32 alignment;
1646
1647         if (INTEL_GEN(dev_priv) >= 12 &&
1648             is_semiplanar_uv_plane(fb, color_plane))
1649                 alignment = intel_tile_row_size(fb, color_plane);
1650         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
1651                 alignment = intel_tile_size(dev_priv);
1652         else
1653                 alignment = 0;
1654
1655         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
1656                 drm_dbg_kms(&dev_priv->drm,
1657                             "Misaligned offset 0x%08x for color plane %d\n",
1658                             fb->offsets[color_plane], color_plane);
1659                 return -EINVAL;
1660         }
1661
1662         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
1663         height = ALIGN(height, intel_tile_height(fb, color_plane));
1664
1665         /* Catch potential overflows early */
1666         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
1667                             fb->offsets[color_plane])) {
1668                 drm_dbg_kms(&dev_priv->drm,
1669                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
1670                             fb->offsets[color_plane], fb->pitches[color_plane],
1671                             color_plane);
1672                 return -ERANGE;
1673         }
1674
1675         *x = 0;
1676         *y = 0;
1677
1678         intel_adjust_aligned_offset(x, y,
1679                                     fb, color_plane, DRM_MODE_ROTATE_0,
1680                                     fb->pitches[color_plane],
1681                                     fb->offsets[color_plane], 0);
1682
1683         return 0;
1684 }
1685
1686 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1687 {
1688         switch (fb_modifier) {
1689         case I915_FORMAT_MOD_X_TILED:
1690                 return I915_TILING_X;
1691         case I915_FORMAT_MOD_Y_TILED:
1692         case I915_FORMAT_MOD_Y_TILED_CCS:
1693         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1694         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1695         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1696                 return I915_TILING_Y;
1697         default:
1698                 return I915_TILING_NONE;
1699         }
1700 }
1701
1702 /*
1703  * From the Sky Lake PRM:
1704  * "The Color Control Surface (CCS) contains the compression status of
1705  *  the cache-line pairs. The compression state of the cache-line pair
1706  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1707  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1708  *  cache-line-pairs. CCS is always Y tiled."
1709  *
1710  * Since cache line pairs refers to horizontally adjacent cache lines,
1711  * each cache line in the CCS corresponds to an area of 32x16 cache
1712  * lines on the main surface. Since each pixel is 4 bytes, this gives
1713  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1714  * main surface.
1715  */
1716 static const struct drm_format_info skl_ccs_formats[] = {
1717         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1718           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1719         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1720           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1721         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1722           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1723         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1724           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1725 };
1726
1727 /*
1728  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1729  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1730  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1731  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1732  * the main surface.
1733  */
1734 static const struct drm_format_info gen12_ccs_formats[] = {
1735         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1736           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1737           .hsub = 1, .vsub = 1, },
1738         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1739           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1740           .hsub = 1, .vsub = 1, },
1741         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1742           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1743           .hsub = 1, .vsub = 1, .has_alpha = true },
1744         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1745           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1746           .hsub = 1, .vsub = 1, .has_alpha = true },
1747         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
1748           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1749           .hsub = 2, .vsub = 1, .is_yuv = true },
1750         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
1751           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1752           .hsub = 2, .vsub = 1, .is_yuv = true },
1753         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
1754           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1755           .hsub = 2, .vsub = 1, .is_yuv = true },
1756         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
1757           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1758           .hsub = 2, .vsub = 1, .is_yuv = true },
1759         { .format = DRM_FORMAT_NV12, .num_planes = 4,
1760           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1761           .hsub = 2, .vsub = 2, .is_yuv = true },
1762         { .format = DRM_FORMAT_P010, .num_planes = 4,
1763           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1764           .hsub = 2, .vsub = 2, .is_yuv = true },
1765         { .format = DRM_FORMAT_P012, .num_planes = 4,
1766           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1767           .hsub = 2, .vsub = 2, .is_yuv = true },
1768         { .format = DRM_FORMAT_P016, .num_planes = 4,
1769           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1770           .hsub = 2, .vsub = 2, .is_yuv = true },
1771 };
1772
1773 /*
1774  * Same as gen12_ccs_formats[] above, but with additional surface used
1775  * to pass Clear Color information in plane 2 with 64 bits of data.
1776  */
1777 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1778         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1779           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1780           .hsub = 1, .vsub = 1, },
1781         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1782           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1783           .hsub = 1, .vsub = 1, },
1784         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1785           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1786           .hsub = 1, .vsub = 1, .has_alpha = true },
1787         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1788           .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1789           .hsub = 1, .vsub = 1, .has_alpha = true },
1790 };
1791
1792 static const struct drm_format_info *
1793 lookup_format_info(const struct drm_format_info formats[],
1794                    int num_formats, u32 format)
1795 {
1796         int i;
1797
1798         for (i = 0; i < num_formats; i++) {
1799                 if (formats[i].format == format)
1800                         return &formats[i];
1801         }
1802
1803         return NULL;
1804 }
1805
1806 static const struct drm_format_info *
1807 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1808 {
1809         switch (cmd->modifier[0]) {
1810         case I915_FORMAT_MOD_Y_TILED_CCS:
1811         case I915_FORMAT_MOD_Yf_TILED_CCS:
1812                 return lookup_format_info(skl_ccs_formats,
1813                                           ARRAY_SIZE(skl_ccs_formats),
1814                                           cmd->pixel_format);
1815         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1816         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1817                 return lookup_format_info(gen12_ccs_formats,
1818                                           ARRAY_SIZE(gen12_ccs_formats),
1819                                           cmd->pixel_format);
1820         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1821                 return lookup_format_info(gen12_ccs_cc_formats,
1822                                           ARRAY_SIZE(gen12_ccs_cc_formats),
1823                                           cmd->pixel_format);
1824         default:
1825                 return NULL;
1826         }
1827 }
1828
1829 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1830 {
1831         return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1832                             512) * 64;
1833 }
1834
1835 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1836                               u32 pixel_format, u64 modifier)
1837 {
1838         struct intel_crtc *crtc;
1839         struct intel_plane *plane;
1840
1841         /*
1842          * We assume the primary plane for pipe A has
1843          * the highest stride limits of them all,
1844          * if in case pipe A is disabled, use the first pipe from pipe_mask.
1845          */
1846         crtc = intel_get_first_crtc(dev_priv);
1847         if (!crtc)
1848                 return 0;
1849
1850         plane = to_intel_plane(crtc->base.primary);
1851
1852         return plane->max_stride(plane, pixel_format, modifier,
1853                                  DRM_MODE_ROTATE_0);
1854 }
1855
1856 static
1857 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1858                         u32 pixel_format, u64 modifier)
1859 {
1860         /*
1861          * Arbitrary limit for gen4+ chosen to match the
1862          * render engine max stride.
1863          *
1864          * The new CCS hash mode makes remapping impossible
1865          */
1866         if (!is_ccs_modifier(modifier)) {
1867                 if (INTEL_GEN(dev_priv) >= 7)
1868                         return 256*1024;
1869                 else if (INTEL_GEN(dev_priv) >= 4)
1870                         return 128*1024;
1871         }
1872
1873         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1874 }
1875
1876 static u32
1877 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1878 {
1879         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1880         u32 tile_width;
1881
1882         if (is_surface_linear(fb, color_plane)) {
1883                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1884                                                            fb->format->format,
1885                                                            fb->modifier);
1886
1887                 /*
1888                  * To make remapping with linear generally feasible
1889                  * we need the stride to be page aligned.
1890                  */
1891                 if (fb->pitches[color_plane] > max_stride &&
1892                     !is_ccs_modifier(fb->modifier))
1893                         return intel_tile_size(dev_priv);
1894                 else
1895                         return 64;
1896         }
1897
1898         tile_width = intel_tile_width_bytes(fb, color_plane);
1899         if (is_ccs_modifier(fb->modifier)) {
1900                 /*
1901                  * Display WA #0531: skl,bxt,kbl,glk
1902                  *
1903                  * Render decompression and plane width > 3840
1904                  * combined with horizontal panning requires the
1905                  * plane stride to be a multiple of 4. We'll just
1906                  * require the entire fb to accommodate that to avoid
1907                  * potential runtime errors at plane configuration time.
1908                  */
1909                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1910                         tile_width *= 4;
1911                 /*
1912                  * The main surface pitch must be padded to a multiple of four
1913                  * tile widths.
1914                  */
1915                 else if (INTEL_GEN(dev_priv) >= 12)
1916                         tile_width *= 4;
1917         }
1918         return tile_width;
1919 }
1920
1921 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
1922 {
1923         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1924         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1925         const struct drm_framebuffer *fb = plane_state->hw.fb;
1926         int i;
1927
1928         /* We don't want to deal with remapping with cursors */
1929         if (plane->id == PLANE_CURSOR)
1930                 return false;
1931
1932         /*
1933          * The display engine limits already match/exceed the
1934          * render engine limits, so not much point in remapping.
1935          * Would also need to deal with the fence POT alignment
1936          * and gen2 2KiB GTT tile size.
1937          */
1938         if (INTEL_GEN(dev_priv) < 4)
1939                 return false;
1940
1941         /*
1942          * The new CCS hash mode isn't compatible with remapping as
1943          * the virtual address of the pages affects the compressed data.
1944          */
1945         if (is_ccs_modifier(fb->modifier))
1946                 return false;
1947
1948         /* Linear needs a page aligned stride for remapping */
1949         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1950                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
1951
1952                 for (i = 0; i < fb->format->num_planes; i++) {
1953                         if (fb->pitches[i] & alignment)
1954                                 return false;
1955                 }
1956         }
1957
1958         return true;
1959 }
1960
1961 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
1962 {
1963         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1964         const struct drm_framebuffer *fb = plane_state->hw.fb;
1965         unsigned int rotation = plane_state->hw.rotation;
1966         u32 stride, max_stride;
1967
1968         /*
1969          * No remapping for invisible planes since we don't have
1970          * an actual source viewport to remap.
1971          */
1972         if (!plane_state->uapi.visible)
1973                 return false;
1974
1975         if (!intel_plane_can_remap(plane_state))
1976                 return false;
1977
1978         /*
1979          * FIXME: aux plane limits on gen9+ are
1980          * unclear in Bspec, for now no checking.
1981          */
1982         stride = intel_fb_pitch(fb, 0, rotation);
1983         max_stride = plane->max_stride(plane, fb->format->format,
1984                                        fb->modifier, rotation);
1985
1986         return stride > max_stride;
1987 }
1988
1989 void
1990 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
1991                                const struct drm_framebuffer *fb,
1992                                int color_plane)
1993 {
1994         int main_plane;
1995
1996         if (color_plane == 0) {
1997                 *hsub = 1;
1998                 *vsub = 1;
1999
2000                 return;
2001         }
2002
2003         /*
2004          * TODO: Deduct the subsampling from the char block for all CCS
2005          * formats and planes.
2006          */
2007         if (!is_gen12_ccs_plane(fb, color_plane)) {
2008                 *hsub = fb->format->hsub;
2009                 *vsub = fb->format->vsub;
2010
2011                 return;
2012         }
2013
2014         main_plane = skl_ccs_to_main_plane(fb, color_plane);
2015         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2016                 drm_format_info_block_width(fb->format, main_plane);
2017
2018         /*
2019          * The min stride check in the core framebuffer_check() function
2020          * assumes that format->hsub applies to every plane except for the
2021          * first plane. That's incorrect for the CCS AUX plane of the first
2022          * plane, but for the above check to pass we must define the block
2023          * width with that subsampling applied to it. Adjust the width here
2024          * accordingly, so we can calculate the actual subsampling factor.
2025          */
2026         if (main_plane == 0)
2027                 *hsub *= fb->format->hsub;
2028
2029         *vsub = 32;
2030 }
2031 static int
2032 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2033 {
2034         struct drm_i915_private *i915 = to_i915(fb->dev);
2035         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2036         int main_plane;
2037         int hsub, vsub;
2038         int tile_width, tile_height;
2039         int ccs_x, ccs_y;
2040         int main_x, main_y;
2041
2042         if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
2043                 return 0;
2044
2045         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2046         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2047
2048         tile_width *= hsub;
2049         tile_height *= vsub;
2050
2051         ccs_x = (x * hsub) % tile_width;
2052         ccs_y = (y * vsub) % tile_height;
2053
2054         main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
2055         main_x = intel_fb->normal[main_plane].x % tile_width;
2056         main_y = intel_fb->normal[main_plane].y % tile_height;
2057
2058         /*
2059          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2060          * x/y offsets must match between CCS and the main surface.
2061          */
2062         if (main_x != ccs_x || main_y != ccs_y) {
2063                 drm_dbg_kms(&i915->drm,
2064                               "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2065                               main_x, main_y,
2066                               ccs_x, ccs_y,
2067                               intel_fb->normal[main_plane].x,
2068                               intel_fb->normal[main_plane].y,
2069                               x, y);
2070                 return -EINVAL;
2071         }
2072
2073         return 0;
2074 }
2075
2076 static void
2077 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2078 {
2079         int main_plane = is_ccs_plane(fb, color_plane) ?
2080                          skl_ccs_to_main_plane(fb, color_plane) : 0;
2081         int main_hsub, main_vsub;
2082         int hsub, vsub;
2083
2084         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2085         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2086         *w = fb->width / main_hsub / hsub;
2087         *h = fb->height / main_vsub / vsub;
2088 }
2089
2090 /*
2091  * Setup the rotated view for an FB plane and return the size the GTT mapping
2092  * requires for this view.
2093  */
2094 static u32
2095 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2096                   u32 gtt_offset_rotated, int x, int y,
2097                   unsigned int width, unsigned int height,
2098                   unsigned int tile_size,
2099                   unsigned int tile_width, unsigned int tile_height,
2100                   struct drm_framebuffer *fb)
2101 {
2102         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2103         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2104         unsigned int pitch_tiles;
2105         struct drm_rect r;
2106
2107         /* Y or Yf modifiers required for 90/270 rotation */
2108         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2109             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2110                 return 0;
2111
2112         if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2113                 return 0;
2114
2115         rot_info->plane[plane] = *plane_info;
2116
2117         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2118
2119         /* rotate the x/y offsets to match the GTT view */
2120         drm_rect_init(&r, x, y, width, height);
2121         drm_rect_rotate(&r,
2122                         plane_info->width * tile_width,
2123                         plane_info->height * tile_height,
2124                         DRM_MODE_ROTATE_270);
2125         x = r.x1;
2126         y = r.y1;
2127
2128         /* rotate the tile dimensions to match the GTT view */
2129         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
2130         swap(tile_width, tile_height);
2131
2132         /*
2133          * We only keep the x/y offsets, so push all of the
2134          * gtt offset into the x/y offsets.
2135          */
2136         intel_adjust_tile_offset(&x, &y,
2137                                  tile_width, tile_height,
2138                                  tile_size, pitch_tiles,
2139                                  gtt_offset_rotated * tile_size, 0);
2140
2141         /*
2142          * First pixel of the framebuffer from
2143          * the start of the rotated gtt mapping.
2144          */
2145         intel_fb->rotated[plane].x = x;
2146         intel_fb->rotated[plane].y = y;
2147
2148         return plane_info->width * plane_info->height;
2149 }
2150
2151 static int
2152 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2153                    struct drm_framebuffer *fb)
2154 {
2155         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2156         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2157         u32 gtt_offset_rotated = 0;
2158         unsigned int max_size = 0;
2159         int i, num_planes = fb->format->num_planes;
2160         unsigned int tile_size = intel_tile_size(dev_priv);
2161
2162         for (i = 0; i < num_planes; i++) {
2163                 unsigned int width, height;
2164                 unsigned int cpp, size;
2165                 u32 offset;
2166                 int x, y;
2167                 int ret;
2168
2169                 /*
2170                  * Plane 2 of Render Compression with Clear Color fb modifier
2171                  * is consumed by the driver and not passed to DE. Skip the
2172                  * arithmetic related to alignment and offset calculation.
2173                  */
2174                 if (is_gen12_ccs_cc_plane(fb, i)) {
2175                         if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
2176                                 continue;
2177                         else
2178                                 return -EINVAL;
2179                 }
2180
2181                 cpp = fb->format->cpp[i];
2182                 intel_fb_plane_dims(&width, &height, fb, i);
2183
2184                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2185                 if (ret) {
2186                         drm_dbg_kms(&dev_priv->drm,
2187                                     "bad fb plane %d offset: 0x%x\n",
2188                                     i, fb->offsets[i]);
2189                         return ret;
2190                 }
2191
2192                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
2193                 if (ret)
2194                         return ret;
2195
2196                 /*
2197                  * The fence (if used) is aligned to the start of the object
2198                  * so having the framebuffer wrap around across the edge of the
2199                  * fenced region doesn't really work. We have no API to configure
2200                  * the fence start offset within the object (nor could we probably
2201                  * on gen2/3). So it's just easier if we just require that the
2202                  * fb layout agrees with the fence layout. We already check that the
2203                  * fb stride matches the fence stride elsewhere.
2204                  */
2205                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2206                     (x + width) * cpp > fb->pitches[i]) {
2207                         drm_dbg_kms(&dev_priv->drm,
2208                                     "bad fb plane %d offset: 0x%x\n",
2209                                      i, fb->offsets[i]);
2210                         return -EINVAL;
2211                 }
2212
2213                 /*
2214                  * First pixel of the framebuffer from
2215                  * the start of the normal gtt mapping.
2216                  */
2217                 intel_fb->normal[i].x = x;
2218                 intel_fb->normal[i].y = y;
2219
2220                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2221                                                       fb->pitches[i],
2222                                                       DRM_MODE_ROTATE_0,
2223                                                       tile_size);
2224                 offset /= tile_size;
2225
2226                 if (!is_surface_linear(fb, i)) {
2227                         struct intel_remapped_plane_info plane_info;
2228                         unsigned int tile_width, tile_height;
2229
2230                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2231
2232                         plane_info.offset = offset;
2233                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
2234                                                          tile_width * cpp);
2235                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
2236                         plane_info.height = DIV_ROUND_UP(y + height,
2237                                                          tile_height);
2238
2239                         /* how many tiles does this plane need */
2240                         size = plane_info.stride * plane_info.height;
2241                         /*
2242                          * If the plane isn't horizontally tile aligned,
2243                          * we need one more tile.
2244                          */
2245                         if (x != 0)
2246                                 size++;
2247
2248                         gtt_offset_rotated +=
2249                                 setup_fb_rotation(i, &plane_info,
2250                                                   gtt_offset_rotated,
2251                                                   x, y, width, height,
2252                                                   tile_size,
2253                                                   tile_width, tile_height,
2254                                                   fb);
2255                 } else {
2256                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2257                                             x * cpp, tile_size);
2258                 }
2259
2260                 /* how many tiles in total needed in the bo */
2261                 max_size = max(max_size, offset + size);
2262         }
2263
2264         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2265                 drm_dbg_kms(&dev_priv->drm,
2266                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
2267                             mul_u32_u32(max_size, tile_size), obj->base.size);
2268                 return -EINVAL;
2269         }
2270
2271         return 0;
2272 }
2273
2274 static void
2275 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2276 {
2277         struct drm_i915_private *dev_priv =
2278                 to_i915(plane_state->uapi.plane->dev);
2279         struct drm_framebuffer *fb = plane_state->hw.fb;
2280         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2281         struct intel_rotation_info *info = &plane_state->view.rotated;
2282         unsigned int rotation = plane_state->hw.rotation;
2283         int i, num_planes = fb->format->num_planes;
2284         unsigned int tile_size = intel_tile_size(dev_priv);
2285         unsigned int src_x, src_y;
2286         unsigned int src_w, src_h;
2287         u32 gtt_offset = 0;
2288
2289         memset(&plane_state->view, 0, sizeof(plane_state->view));
2290         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2291                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2292
2293         src_x = plane_state->uapi.src.x1 >> 16;
2294         src_y = plane_state->uapi.src.y1 >> 16;
2295         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2296         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2297
2298         drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
2299
2300         /* Make src coordinates relative to the viewport */
2301         drm_rect_translate(&plane_state->uapi.src,
2302                            -(src_x << 16), -(src_y << 16));
2303
2304         /* Rotate src coordinates to match rotated GTT view */
2305         if (drm_rotation_90_or_270(rotation))
2306                 drm_rect_rotate(&plane_state->uapi.src,
2307                                 src_w << 16, src_h << 16,
2308                                 DRM_MODE_ROTATE_270);
2309
2310         for (i = 0; i < num_planes; i++) {
2311                 unsigned int hsub = i ? fb->format->hsub : 1;
2312                 unsigned int vsub = i ? fb->format->vsub : 1;
2313                 unsigned int cpp = fb->format->cpp[i];
2314                 unsigned int tile_width, tile_height;
2315                 unsigned int width, height;
2316                 unsigned int pitch_tiles;
2317                 unsigned int x, y;
2318                 u32 offset;
2319
2320                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2321
2322                 x = src_x / hsub;
2323                 y = src_y / vsub;
2324                 width = src_w / hsub;
2325                 height = src_h / vsub;
2326
2327                 /*
2328                  * First pixel of the src viewport from the
2329                  * start of the normal gtt mapping.
2330                  */
2331                 x += intel_fb->normal[i].x;
2332                 y += intel_fb->normal[i].y;
2333
2334                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2335                                                       fb, i, fb->pitches[i],
2336                                                       DRM_MODE_ROTATE_0, tile_size);
2337                 offset /= tile_size;
2338
2339                 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
2340                 info->plane[i].offset = offset;
2341                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2342                                                      tile_width * cpp);
2343                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2344                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2345
2346                 if (drm_rotation_90_or_270(rotation)) {
2347                         struct drm_rect r;
2348
2349                         /* rotate the x/y offsets to match the GTT view */
2350                         drm_rect_init(&r, x, y, width, height);
2351                         drm_rect_rotate(&r,
2352                                         info->plane[i].width * tile_width,
2353                                         info->plane[i].height * tile_height,
2354                                         DRM_MODE_ROTATE_270);
2355                         x = r.x1;
2356                         y = r.y1;
2357
2358                         pitch_tiles = info->plane[i].height;
2359                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2360
2361                         /* rotate the tile dimensions to match the GTT view */
2362                         swap(tile_width, tile_height);
2363                 } else {
2364                         pitch_tiles = info->plane[i].width;
2365                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2366                 }
2367
2368                 /*
2369                  * We only keep the x/y offsets, so push all of the
2370                  * gtt offset into the x/y offsets.
2371                  */
2372                 intel_adjust_tile_offset(&x, &y,
2373                                          tile_width, tile_height,
2374                                          tile_size, pitch_tiles,
2375                                          gtt_offset * tile_size, 0);
2376
2377                 gtt_offset += info->plane[i].width * info->plane[i].height;
2378
2379                 plane_state->color_plane[i].offset = 0;
2380                 plane_state->color_plane[i].x = x;
2381                 plane_state->color_plane[i].y = y;
2382         }
2383 }
2384
2385 int
2386 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2387 {
2388         const struct intel_framebuffer *fb =
2389                 to_intel_framebuffer(plane_state->hw.fb);
2390         unsigned int rotation = plane_state->hw.rotation;
2391         int i, num_planes;
2392
2393         if (!fb)
2394                 return 0;
2395
2396         num_planes = fb->base.format->num_planes;
2397
2398         if (intel_plane_needs_remap(plane_state)) {
2399                 intel_plane_remap_gtt(plane_state);
2400
2401                 /*
2402                  * Sometimes even remapping can't overcome
2403                  * the stride limitations :( Can happen with
2404                  * big plane sizes and suitably misaligned
2405                  * offsets.
2406                  */
2407                 return intel_plane_check_stride(plane_state);
2408         }
2409
2410         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2411
2412         for (i = 0; i < num_planes; i++) {
2413                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2414                 plane_state->color_plane[i].offset = 0;
2415
2416                 if (drm_rotation_90_or_270(rotation)) {
2417                         plane_state->color_plane[i].x = fb->rotated[i].x;
2418                         plane_state->color_plane[i].y = fb->rotated[i].y;
2419                 } else {
2420                         plane_state->color_plane[i].x = fb->normal[i].x;
2421                         plane_state->color_plane[i].y = fb->normal[i].y;
2422                 }
2423         }
2424
2425         /* Rotate src coordinates to match rotated GTT view */
2426         if (drm_rotation_90_or_270(rotation))
2427                 drm_rect_rotate(&plane_state->uapi.src,
2428                                 fb->base.width << 16, fb->base.height << 16,
2429                                 DRM_MODE_ROTATE_270);
2430
2431         return intel_plane_check_stride(plane_state);
2432 }
2433
2434 static int i9xx_format_to_fourcc(int format)
2435 {
2436         switch (format) {
2437         case DISPPLANE_8BPP:
2438                 return DRM_FORMAT_C8;
2439         case DISPPLANE_BGRA555:
2440                 return DRM_FORMAT_ARGB1555;
2441         case DISPPLANE_BGRX555:
2442                 return DRM_FORMAT_XRGB1555;
2443         case DISPPLANE_BGRX565:
2444                 return DRM_FORMAT_RGB565;
2445         default:
2446         case DISPPLANE_BGRX888:
2447                 return DRM_FORMAT_XRGB8888;
2448         case DISPPLANE_RGBX888:
2449                 return DRM_FORMAT_XBGR8888;
2450         case DISPPLANE_BGRA888:
2451                 return DRM_FORMAT_ARGB8888;
2452         case DISPPLANE_RGBA888:
2453                 return DRM_FORMAT_ABGR8888;
2454         case DISPPLANE_BGRX101010:
2455                 return DRM_FORMAT_XRGB2101010;
2456         case DISPPLANE_RGBX101010:
2457                 return DRM_FORMAT_XBGR2101010;
2458         case DISPPLANE_BGRA101010:
2459                 return DRM_FORMAT_ARGB2101010;
2460         case DISPPLANE_RGBA101010:
2461                 return DRM_FORMAT_ABGR2101010;
2462         case DISPPLANE_RGBX161616:
2463                 return DRM_FORMAT_XBGR16161616F;
2464         }
2465 }
2466
2467 static struct i915_vma *
2468 initial_plane_vma(struct drm_i915_private *i915,
2469                   struct intel_initial_plane_config *plane_config)
2470 {
2471         struct drm_i915_gem_object *obj;
2472         struct i915_vma *vma;
2473         u32 base, size;
2474
2475         if (plane_config->size == 0)
2476                 return NULL;
2477
2478         base = round_down(plane_config->base,
2479                           I915_GTT_MIN_ALIGNMENT);
2480         size = round_up(plane_config->base + plane_config->size,
2481                         I915_GTT_MIN_ALIGNMENT);
2482         size -= base;
2483
2484         /*
2485          * If the FB is too big, just don't use it since fbdev is not very
2486          * important and we should probably use that space with FBC or other
2487          * features.
2488          */
2489         if (size * 2 > i915->stolen_usable_size)
2490                 return NULL;
2491
2492         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
2493         if (IS_ERR(obj))
2494                 return NULL;
2495
2496         /*
2497          * Mark it WT ahead of time to avoid changing the
2498          * cache_level during fbdev initialization. The
2499          * unbind there would get stuck waiting for rcu.
2500          */
2501         i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
2502                                             I915_CACHE_WT : I915_CACHE_NONE);
2503
2504         switch (plane_config->tiling) {
2505         case I915_TILING_NONE:
2506                 break;
2507         case I915_TILING_X:
2508         case I915_TILING_Y:
2509                 obj->tiling_and_stride =
2510                         plane_config->fb->base.pitches[0] |
2511                         plane_config->tiling;
2512                 break;
2513         default:
2514                 MISSING_CASE(plane_config->tiling);
2515                 goto err_obj;
2516         }
2517
2518         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
2519         if (IS_ERR(vma))
2520                 goto err_obj;
2521
2522         if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
2523                 goto err_obj;
2524
2525         if (i915_gem_object_is_tiled(obj) &&
2526             !i915_vma_is_map_and_fenceable(vma))
2527                 goto err_obj;
2528
2529         return vma;
2530
2531 err_obj:
2532         i915_gem_object_put(obj);
2533         return NULL;
2534 }
2535
2536 static bool
2537 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2538                               struct intel_initial_plane_config *plane_config)
2539 {
2540         struct drm_device *dev = crtc->base.dev;
2541         struct drm_i915_private *dev_priv = to_i915(dev);
2542         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2543         struct drm_framebuffer *fb = &plane_config->fb->base;
2544         struct i915_vma *vma;
2545
2546         switch (fb->modifier) {
2547         case DRM_FORMAT_MOD_LINEAR:
2548         case I915_FORMAT_MOD_X_TILED:
2549         case I915_FORMAT_MOD_Y_TILED:
2550                 break;
2551         default:
2552                 drm_dbg(&dev_priv->drm,
2553                         "Unsupported modifier for initial FB: 0x%llx\n",
2554                         fb->modifier);
2555                 return false;
2556         }
2557
2558         vma = initial_plane_vma(dev_priv, plane_config);
2559         if (!vma)
2560                 return false;
2561
2562         mode_cmd.pixel_format = fb->format->format;
2563         mode_cmd.width = fb->width;
2564         mode_cmd.height = fb->height;
2565         mode_cmd.pitches[0] = fb->pitches[0];
2566         mode_cmd.modifier[0] = fb->modifier;
2567         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2568
2569         if (intel_framebuffer_init(to_intel_framebuffer(fb),
2570                                    vma->obj, &mode_cmd)) {
2571                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
2572                 goto err_vma;
2573         }
2574
2575         plane_config->vma = vma;
2576         return true;
2577
2578 err_vma:
2579         i915_vma_put(vma);
2580         return false;
2581 }
2582
2583 static void
2584 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2585                         struct intel_plane_state *plane_state,
2586                         bool visible)
2587 {
2588         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2589
2590         plane_state->uapi.visible = visible;
2591
2592         if (visible)
2593                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
2594         else
2595                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
2596 }
2597
2598 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
2599 {
2600         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2601         struct drm_plane *plane;
2602
2603         /*
2604          * Active_planes aliases if multiple "primary" or cursor planes
2605          * have been used on the same (or wrong) pipe. plane_mask uses
2606          * unique ids, hence we can use that to reconstruct active_planes.
2607          */
2608         crtc_state->enabled_planes = 0;
2609         crtc_state->active_planes = 0;
2610
2611         drm_for_each_plane_mask(plane, &dev_priv->drm,
2612                                 crtc_state->uapi.plane_mask) {
2613                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
2614                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2615         }
2616 }
2617
2618 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2619                                          struct intel_plane *plane)
2620 {
2621         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2622         struct intel_crtc_state *crtc_state =
2623                 to_intel_crtc_state(crtc->base.state);
2624         struct intel_plane_state *plane_state =
2625                 to_intel_plane_state(plane->base.state);
2626
2627         drm_dbg_kms(&dev_priv->drm,
2628                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2629                     plane->base.base.id, plane->base.name,
2630                     crtc->base.base.id, crtc->base.name);
2631
2632         intel_set_plane_visible(crtc_state, plane_state, false);
2633         fixup_plane_bitmasks(crtc_state);
2634         crtc_state->data_rate[plane->id] = 0;
2635         crtc_state->min_cdclk[plane->id] = 0;
2636
2637         if (plane->id == PLANE_PRIMARY)
2638                 hsw_disable_ips(crtc_state);
2639
2640         /*
2641          * Vblank time updates from the shadow to live plane control register
2642          * are blocked if the memory self-refresh mode is active at that
2643          * moment. So to make sure the plane gets truly disabled, disable
2644          * first the self-refresh mode. The self-refresh enable bit in turn
2645          * will be checked/applied by the HW only at the next frame start
2646          * event which is after the vblank start event, so we need to have a
2647          * wait-for-vblank between disabling the plane and the pipe.
2648          */
2649         if (HAS_GMCH(dev_priv) &&
2650             intel_set_memory_cxsr(dev_priv, false))
2651                 intel_wait_for_vblank(dev_priv, crtc->pipe);
2652
2653         /*
2654          * Gen2 reports pipe underruns whenever all planes are disabled.
2655          * So disable underrun reporting before all the planes get disabled.
2656          */
2657         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
2658                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
2659
2660         intel_disable_plane(plane, crtc_state);
2661 }
2662
2663 static void
2664 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2665                              struct intel_initial_plane_config *plane_config)
2666 {
2667         struct drm_device *dev = intel_crtc->base.dev;
2668         struct drm_i915_private *dev_priv = to_i915(dev);
2669         struct drm_crtc *c;
2670         struct drm_plane *primary = intel_crtc->base.primary;
2671         struct drm_plane_state *plane_state = primary->state;
2672         struct intel_plane *intel_plane = to_intel_plane(primary);
2673         struct intel_plane_state *intel_state =
2674                 to_intel_plane_state(plane_state);
2675         struct intel_crtc_state *crtc_state =
2676                 to_intel_crtc_state(intel_crtc->base.state);
2677         struct drm_framebuffer *fb;
2678         struct i915_vma *vma;
2679
2680         if (!plane_config->fb)
2681                 return;
2682
2683         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2684                 fb = &plane_config->fb->base;
2685                 vma = plane_config->vma;
2686                 goto valid_fb;
2687         }
2688
2689         /*
2690          * Failed to alloc the obj, check to see if we should share
2691          * an fb with another CRTC instead
2692          */
2693         for_each_crtc(dev, c) {
2694                 struct intel_plane_state *state;
2695
2696                 if (c == &intel_crtc->base)
2697                         continue;
2698
2699                 if (!to_intel_crtc_state(c->state)->uapi.active)
2700                         continue;
2701
2702                 state = to_intel_plane_state(c->primary->state);
2703                 if (!state->vma)
2704                         continue;
2705
2706                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2707                         fb = state->hw.fb;
2708                         vma = state->vma;
2709                         goto valid_fb;
2710                 }
2711         }
2712
2713         /*
2714          * We've failed to reconstruct the BIOS FB.  Current display state
2715          * indicates that the primary plane is visible, but has a NULL FB,
2716          * which will lead to problems later if we don't fix it up.  The
2717          * simplest solution is to just disable the primary plane now and
2718          * pretend the BIOS never had it enabled.
2719          */
2720         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2721         if (crtc_state->bigjoiner) {
2722                 struct intel_crtc *slave =
2723                         crtc_state->bigjoiner_linked_crtc;
2724                 intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
2725         }
2726
2727         return;
2728
2729 valid_fb:
2730         intel_state->hw.rotation = plane_config->rotation;
2731         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2732                                 intel_state->hw.rotation);
2733         intel_state->color_plane[0].stride =
2734                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
2735
2736         __i915_vma_pin(vma);
2737         intel_state->vma = i915_vma_get(vma);
2738         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
2739                 if (vma->fence)
2740                         intel_state->flags |= PLANE_HAS_FENCE;
2741
2742         plane_state->src_x = 0;
2743         plane_state->src_y = 0;
2744         plane_state->src_w = fb->width << 16;
2745         plane_state->src_h = fb->height << 16;
2746
2747         plane_state->crtc_x = 0;
2748         plane_state->crtc_y = 0;
2749         plane_state->crtc_w = fb->width;
2750         plane_state->crtc_h = fb->height;
2751
2752         intel_state->uapi.src = drm_plane_state_src(plane_state);
2753         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
2754
2755         if (plane_config->tiling)
2756                 dev_priv->preserve_bios_swizzle = true;
2757
2758         plane_state->fb = fb;
2759         drm_framebuffer_get(fb);
2760
2761         plane_state->crtc = &intel_crtc->base;
2762         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
2763                                           intel_crtc);
2764
2765         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
2766
2767         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2768                   &to_intel_frontbuffer(fb)->bits);
2769 }
2770
2771 unsigned int
2772 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
2773 {
2774         int x = 0, y = 0;
2775
2776         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2777                                           plane_state->color_plane[0].offset, 0);
2778
2779         return y;
2780 }
2781
2782 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2783 {
2784         struct drm_device *dev = intel_crtc->base.dev;
2785         struct drm_i915_private *dev_priv = to_i915(dev);
2786         unsigned long irqflags;
2787
2788         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
2789
2790         intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2791         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2792         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2793
2794         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
2795 }
2796
2797 /*
2798  * This function detaches (aka. unbinds) unused scalers in hardware
2799  */
2800 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
2801 {
2802         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
2803         const struct intel_crtc_scaler_state *scaler_state =
2804                 &crtc_state->scaler_state;
2805         int i;
2806
2807         /* loop through and disable scalers that aren't in use */
2808         for (i = 0; i < intel_crtc->num_scalers; i++) {
2809                 if (!scaler_state->scalers[i].in_use)
2810                         skl_detach_scaler(intel_crtc, i);
2811         }
2812 }
2813
2814 static int
2815 __intel_display_resume(struct drm_device *dev,
2816                        struct drm_atomic_state *state,
2817                        struct drm_modeset_acquire_ctx *ctx)
2818 {
2819         struct drm_crtc_state *crtc_state;
2820         struct drm_crtc *crtc;
2821         int i, ret;
2822
2823         intel_modeset_setup_hw_state(dev, ctx);
2824         intel_vga_redisable(to_i915(dev));
2825
2826         if (!state)
2827                 return 0;
2828
2829         /*
2830          * We've duplicated the state, pointers to the old state are invalid.
2831          *
2832          * Don't attempt to use the old state until we commit the duplicated state.
2833          */
2834         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2835                 /*
2836                  * Force recalculation even if we restore
2837                  * current state. With fast modeset this may not result
2838                  * in a modeset when the state is compatible.
2839                  */
2840                 crtc_state->mode_changed = true;
2841         }
2842
2843         /* ignore any reset values/BIOS leftovers in the WM registers */
2844         if (!HAS_GMCH(to_i915(dev)))
2845                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
2846
2847         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
2848
2849         drm_WARN_ON(dev, ret == -EDEADLK);
2850         return ret;
2851 }
2852
2853 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
2854 {
2855         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
2856                 intel_has_gpu_reset(&dev_priv->gt));
2857 }
2858
2859 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
2860 {
2861         struct drm_device *dev = &dev_priv->drm;
2862         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2863         struct drm_atomic_state *state;
2864         int ret;
2865
2866         if (!HAS_DISPLAY(dev_priv))
2867                 return;
2868
2869         /* reset doesn't touch the display */
2870         if (!dev_priv->params.force_reset_modeset_test &&
2871             !gpu_reset_clobbers_display(dev_priv))
2872                 return;
2873
2874         /* We have a modeset vs reset deadlock, defensively unbreak it. */
2875         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2876         smp_mb__after_atomic();
2877         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
2878
2879         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
2880                 drm_dbg_kms(&dev_priv->drm,
2881                             "Modeset potentially stuck, unbreaking through wedging\n");
2882                 intel_gt_set_wedged(&dev_priv->gt);
2883         }
2884
2885         /*
2886          * Need mode_config.mutex so that we don't
2887          * trample ongoing ->detect() and whatnot.
2888          */
2889         mutex_lock(&dev->mode_config.mutex);
2890         drm_modeset_acquire_init(ctx, 0);
2891         while (1) {
2892                 ret = drm_modeset_lock_all_ctx(dev, ctx);
2893                 if (ret != -EDEADLK)
2894                         break;
2895
2896                 drm_modeset_backoff(ctx);
2897         }
2898         /*
2899          * Disabling the crtcs gracefully seems nicer. Also the
2900          * g33 docs say we should at least disable all the planes.
2901          */
2902         state = drm_atomic_helper_duplicate_state(dev, ctx);
2903         if (IS_ERR(state)) {
2904                 ret = PTR_ERR(state);
2905                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
2906                         ret);
2907                 return;
2908         }
2909
2910         ret = drm_atomic_helper_disable_all(dev, ctx);
2911         if (ret) {
2912                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2913                         ret);
2914                 drm_atomic_state_put(state);
2915                 return;
2916         }
2917
2918         dev_priv->modeset_restore_state = state;
2919         state->acquire_ctx = ctx;
2920 }
2921
2922 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
2923 {
2924         struct drm_device *dev = &dev_priv->drm;
2925         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
2926         struct drm_atomic_state *state;
2927         int ret;
2928
2929         if (!HAS_DISPLAY(dev_priv))
2930                 return;
2931
2932         /* reset doesn't touch the display */
2933         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
2934                 return;
2935
2936         state = fetch_and_zero(&dev_priv->modeset_restore_state);
2937         if (!state)
2938                 goto unlock;
2939
2940         /* reset doesn't touch the display */
2941         if (!gpu_reset_clobbers_display(dev_priv)) {
2942                 /* for testing only restore the display */
2943                 ret = __intel_display_resume(dev, state, ctx);
2944                 if (ret)
2945                         drm_err(&dev_priv->drm,
2946                                 "Restoring old state failed with %i\n", ret);
2947         } else {
2948                 /*
2949                  * The display has been reset as well,
2950                  * so need a full re-initialization.
2951                  */
2952                 intel_pps_unlock_regs_wa(dev_priv);
2953                 intel_modeset_init_hw(dev_priv);
2954                 intel_init_clock_gating(dev_priv);
2955                 intel_hpd_init(dev_priv);
2956
2957                 ret = __intel_display_resume(dev, state, ctx);
2958                 if (ret)
2959                         drm_err(&dev_priv->drm,
2960                                 "Restoring old state failed with %i\n", ret);
2961
2962                 intel_hpd_poll_disable(dev_priv);
2963         }
2964
2965         drm_atomic_state_put(state);
2966 unlock:
2967         drm_modeset_drop_locks(ctx);
2968         drm_modeset_acquire_fini(ctx);
2969         mutex_unlock(&dev->mode_config.mutex);
2970
2971         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
2972 }
2973
2974 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
2975 {
2976         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2977         enum pipe pipe = crtc->pipe;
2978         u32 tmp;
2979
2980         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2981
2982         /*
2983          * Display WA #1153: icl
2984          * enable hardware to bypass the alpha math
2985          * and rounding for per-pixel values 00 and 0xff
2986          */
2987         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2988         /*
2989          * Display WA # 1605353570: icl
2990          * Set the pixel rounding bit to 1 for allowing
2991          * passthrough of Frame buffer pixels unmodified
2992          * across pipe
2993          */
2994         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2995         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2996 }
2997
2998 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2999 {
3000         struct drm_crtc *crtc;
3001         bool cleanup_done;
3002
3003         drm_for_each_crtc(crtc, &dev_priv->drm) {
3004                 struct drm_crtc_commit *commit;
3005                 spin_lock(&crtc->commit_lock);
3006                 commit = list_first_entry_or_null(&crtc->commit_list,
3007                                                   struct drm_crtc_commit, commit_entry);
3008                 cleanup_done = commit ?
3009                         try_wait_for_completion(&commit->cleanup_done) : true;
3010                 spin_unlock(&crtc->commit_lock);
3011
3012                 if (cleanup_done)
3013                         continue;
3014
3015                 drm_crtc_wait_one_vblank(crtc);
3016
3017                 return true;
3018         }
3019
3020         return false;
3021 }
3022
3023 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3024 {
3025         u32 temp;
3026
3027         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
3028
3029         mutex_lock(&dev_priv->sb_lock);
3030
3031         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3032         temp |= SBI_SSCCTL_DISABLE;
3033         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3034
3035         mutex_unlock(&dev_priv->sb_lock);
3036 }
3037
3038 /* Program iCLKIP clock to the desired frequency */
3039 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
3040 {
3041         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3042         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3043         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
3044         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3045         u32 temp;
3046
3047         lpt_disable_iclkip(dev_priv);
3048
3049         /* The iCLK virtual clock root frequency is in MHz,
3050          * but the adjusted_mode->crtc_clock in in KHz. To get the
3051          * divisors, it is necessary to divide one by another, so we
3052          * convert the virtual clock precision to KHz here for higher
3053          * precision.
3054          */
3055         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3056                 u32 iclk_virtual_root_freq = 172800 * 1000;
3057                 u32 iclk_pi_range = 64;
3058                 u32 desired_divisor;
3059
3060                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3061                                                     clock << auxdiv);
3062                 divsel = (desired_divisor / iclk_pi_range) - 2;
3063                 phaseinc = desired_divisor % iclk_pi_range;
3064
3065                 /*
3066                  * Near 20MHz is a corner case which is
3067                  * out of range for the 7-bit divisor
3068                  */
3069                 if (divsel <= 0x7f)
3070                         break;
3071         }
3072
3073         /* This should not happen with any sane values */
3074         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3075                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3076         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
3077                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3078
3079         drm_dbg_kms(&dev_priv->drm,
3080                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3081                     clock, auxdiv, divsel, phasedir, phaseinc);
3082
3083         mutex_lock(&dev_priv->sb_lock);
3084
3085         /* Program SSCDIVINTPHASE6 */
3086         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3087         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3088         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3089         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3090         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3091         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3092         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3093         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3094
3095         /* Program SSCAUXDIV */
3096         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3097         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3098         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3099         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3100
3101         /* Enable modulator and associated divider */
3102         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3103         temp &= ~SBI_SSCCTL_DISABLE;
3104         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3105
3106         mutex_unlock(&dev_priv->sb_lock);
3107
3108         /* Wait for initialization time */
3109         udelay(24);
3110
3111         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3112 }
3113
3114 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3115 {
3116         u32 divsel, phaseinc, auxdiv;
3117         u32 iclk_virtual_root_freq = 172800 * 1000;
3118         u32 iclk_pi_range = 64;
3119         u32 desired_divisor;
3120         u32 temp;
3121
3122         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3123                 return 0;
3124
3125         mutex_lock(&dev_priv->sb_lock);
3126
3127         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3128         if (temp & SBI_SSCCTL_DISABLE) {
3129                 mutex_unlock(&dev_priv->sb_lock);
3130                 return 0;
3131         }
3132
3133         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3134         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3135                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3136         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3137                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3138
3139         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3140         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3141                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3142
3143         mutex_unlock(&dev_priv->sb_lock);
3144
3145         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3146
3147         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3148                                  desired_divisor << auxdiv);
3149 }
3150
3151 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
3152                                            enum pipe pch_transcoder)
3153 {
3154         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3155         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3156         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3157
3158         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
3159                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
3160         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
3161                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
3162         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
3163                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
3164
3165         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
3166                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3167         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
3168                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
3169         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
3170                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
3171         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3172                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
3173 }
3174
3175 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
3176 {
3177         u32 temp;
3178
3179         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
3180         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
3181                 return;
3182
3183         drm_WARN_ON(&dev_priv->drm,
3184                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
3185                     FDI_RX_ENABLE);
3186         drm_WARN_ON(&dev_priv->drm,
3187                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
3188                     FDI_RX_ENABLE);
3189
3190         temp &= ~FDI_BC_BIFURCATION_SELECT;
3191         if (enable)
3192                 temp |= FDI_BC_BIFURCATION_SELECT;
3193
3194         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
3195                     enable ? "en" : "dis");
3196         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
3197         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
3198 }
3199
3200 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
3201 {
3202         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3203         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3204
3205         switch (crtc->pipe) {
3206         case PIPE_A:
3207                 break;
3208         case PIPE_B:
3209                 if (crtc_state->fdi_lanes > 2)
3210                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
3211                 else
3212                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
3213
3214                 break;
3215         case PIPE_C:
3216                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
3217
3218                 break;
3219         default:
3220                 BUG();
3221         }
3222 }
3223
3224 /*
3225  * Finds the encoder associated with the given CRTC. This can only be
3226  * used when we know that the CRTC isn't feeding multiple encoders!
3227  */
3228 struct intel_encoder *
3229 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
3230                            const struct intel_crtc_state *crtc_state)
3231 {
3232         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3233         const struct drm_connector_state *connector_state;
3234         const struct drm_connector *connector;
3235         struct intel_encoder *encoder = NULL;
3236         int num_encoders = 0;
3237         int i;
3238
3239         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
3240                 if (connector_state->crtc != &crtc->base)
3241                         continue;
3242
3243                 encoder = to_intel_encoder(connector_state->best_encoder);
3244                 num_encoders++;
3245         }
3246
3247         drm_WARN(encoder->base.dev, num_encoders != 1,
3248                  "%d encoders for pipe %c\n",
3249                  num_encoders, pipe_name(crtc->pipe));
3250
3251         return encoder;
3252 }
3253
3254 /*
3255  * Enable PCH resources required for PCH ports:
3256  *   - PCH PLLs
3257  *   - FDI training & RX/TX
3258  *   - update transcoder timings
3259  *   - DP transcoding bits
3260  *   - transcoder
3261  */
3262 static void ilk_pch_enable(const struct intel_atomic_state *state,
3263                            const struct intel_crtc_state *crtc_state)
3264 {
3265         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3266         struct drm_device *dev = crtc->base.dev;
3267         struct drm_i915_private *dev_priv = to_i915(dev);
3268         enum pipe pipe = crtc->pipe;
3269         u32 temp;
3270
3271         assert_pch_transcoder_disabled(dev_priv, pipe);
3272
3273         if (IS_IVYBRIDGE(dev_priv))
3274                 ivb_update_fdi_bc_bifurcation(crtc_state);
3275
3276         /* Write the TU size bits before fdi link training, so that error
3277          * detection works. */
3278         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
3279                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3280
3281         /* For PCH output, training FDI link */
3282         dev_priv->display.fdi_link_train(crtc, crtc_state);
3283
3284         /* We need to program the right clock selection before writing the pixel
3285          * mutliplier into the DPLL. */
3286         if (HAS_PCH_CPT(dev_priv)) {
3287                 u32 sel;
3288
3289                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3290                 temp |= TRANS_DPLL_ENABLE(pipe);
3291                 sel = TRANS_DPLLB_SEL(pipe);
3292                 if (crtc_state->shared_dpll ==
3293                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
3294                         temp |= sel;
3295                 else
3296                         temp &= ~sel;
3297                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3298         }
3299
3300         /* XXX: pch pll's can be enabled any time before we enable the PCH
3301          * transcoder, and we actually should do this to not upset any PCH
3302          * transcoder that already use the clock when we share it.
3303          *
3304          * Note that enable_shared_dpll tries to do the right thing, but
3305          * get_shared_dpll unconditionally resets the pll - we need that to have
3306          * the right LVDS enable sequence. */
3307         intel_enable_shared_dpll(crtc_state);
3308
3309         /* set transcoder timing, panel must allow it */
3310         assert_panel_unlocked(dev_priv, pipe);
3311         ilk_pch_transcoder_set_timings(crtc_state, pipe);
3312
3313         intel_fdi_normal_train(crtc);
3314
3315         /* For PCH DP, enable TRANS_DP_CTL */
3316         if (HAS_PCH_CPT(dev_priv) &&
3317             intel_crtc_has_dp_encoder(crtc_state)) {
3318                 const struct drm_display_mode *adjusted_mode =
3319                         &crtc_state->hw.adjusted_mode;
3320                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3321                 i915_reg_t reg = TRANS_DP_CTL(pipe);
3322                 enum port port;
3323
3324                 temp = intel_de_read(dev_priv, reg);
3325                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3326                           TRANS_DP_SYNC_MASK |
3327                           TRANS_DP_BPC_MASK);
3328                 temp |= TRANS_DP_OUTPUT_ENABLE;
3329                 temp |= bpc << 9; /* same format but at 11:9 */
3330
3331                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
3332                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3333                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
3334                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3335
3336                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
3337                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
3338                 temp |= TRANS_DP_PORT_SEL(port);
3339
3340                 intel_de_write(dev_priv, reg, temp);
3341         }
3342
3343         ilk_enable_pch_transcoder(crtc_state);
3344 }
3345
3346 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
3347 {
3348         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3349         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3350         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3351
3352         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
3353
3354         lpt_program_iclkip(crtc_state);
3355
3356         /* Set transcoder timing. */
3357         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
3358
3359         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3360 }
3361
3362 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
3363                                enum pipe pipe)
3364 {
3365         i915_reg_t dslreg = PIPEDSL(pipe);
3366         u32 temp;
3367
3368         temp = intel_de_read(dev_priv, dslreg);
3369         udelay(500);
3370         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
3371                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
3372                         drm_err(&dev_priv->drm,
3373                                 "mode set failed: pipe %c stuck\n",
3374                                 pipe_name(pipe));
3375         }
3376 }
3377
3378 /*
3379  * The hardware phase 0.0 refers to the center of the pixel.
3380  * We want to start from the top/left edge which is phase
3381  * -0.5. That matches how the hardware calculates the scaling
3382  * factors (from top-left of the first pixel to bottom-right
3383  * of the last pixel, as opposed to the pixel centers).
3384  *
3385  * For 4:2:0 subsampled chroma planes we obviously have to
3386  * adjust that so that the chroma sample position lands in
3387  * the right spot.
3388  *
3389  * Note that for packed YCbCr 4:2:2 formats there is no way to
3390  * control chroma siting. The hardware simply replicates the
3391  * chroma samples for both of the luma samples, and thus we don't
3392  * actually get the expected MPEG2 chroma siting convention :(
3393  * The same behaviour is observed on pre-SKL platforms as well.
3394  *
3395  * Theory behind the formula (note that we ignore sub-pixel
3396  * source coordinates):
3397  * s = source sample position
3398  * d = destination sample position
3399  *
3400  * Downscaling 4:1:
3401  * -0.5
3402  * | 0.0
3403  * | |     1.5 (initial phase)
3404  * | |     |
3405  * v v     v
3406  * | s | s | s | s |
3407  * |       d       |
3408  *
3409  * Upscaling 1:4:
3410  * -0.5
3411  * | -0.375 (initial phase)
3412  * | |     0.0
3413  * | |     |
3414  * v v     v
3415  * |       s       |
3416  * | d | d | d | d |
3417  */
3418 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
3419 {
3420         int phase = -0x8000;
3421         u16 trip = 0;
3422
3423         if (chroma_cosited)
3424                 phase += (sub - 1) * 0x8000 / sub;
3425
3426         phase += scale / (2 * sub);
3427
3428         /*
3429          * Hardware initial phase limited to [-0.5:1.5].
3430          * Since the max hardware scale factor is 3.0, we
3431          * should never actually excdeed 1.0 here.
3432          */
3433         WARN_ON(phase < -0x8000 || phase > 0x18000);
3434
3435         if (phase < 0)
3436                 phase = 0x10000 + phase;
3437         else
3438                 trip = PS_PHASE_TRIP;
3439
3440         return ((phase >> 2) & PS_PHASE_MASK) | trip;
3441 }
3442
3443 #define SKL_MIN_SRC_W 8
3444 #define SKL_MAX_SRC_W 4096
3445 #define SKL_MIN_SRC_H 8
3446 #define SKL_MAX_SRC_H 4096
3447 #define SKL_MIN_DST_W 8
3448 #define SKL_MAX_DST_W 4096
3449 #define SKL_MIN_DST_H 8
3450 #define SKL_MAX_DST_H 4096
3451 #define ICL_MAX_SRC_W 5120
3452 #define ICL_MAX_SRC_H 4096
3453 #define ICL_MAX_DST_W 5120
3454 #define ICL_MAX_DST_H 4096
3455 #define SKL_MIN_YUV_420_SRC_W 16
3456 #define SKL_MIN_YUV_420_SRC_H 16
3457
3458 static int
3459 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
3460                   unsigned int scaler_user, int *scaler_id,
3461                   int src_w, int src_h, int dst_w, int dst_h,
3462                   const struct drm_format_info *format,
3463                   u64 modifier, bool need_scaler)
3464 {
3465         struct intel_crtc_scaler_state *scaler_state =
3466                 &crtc_state->scaler_state;
3467         struct intel_crtc *intel_crtc =
3468                 to_intel_crtc(crtc_state->uapi.crtc);
3469         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3470         const struct drm_display_mode *adjusted_mode =
3471                 &crtc_state->hw.adjusted_mode;
3472
3473         /*
3474          * Src coordinates are already rotated by 270 degrees for
3475          * the 90/270 degree plane rotation cases (to match the
3476          * GTT mapping), hence no need to account for rotation here.
3477          */
3478         if (src_w != dst_w || src_h != dst_h)
3479                 need_scaler = true;
3480
3481         /*
3482          * Scaling/fitting not supported in IF-ID mode in GEN9+
3483          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
3484          * Once NV12 is enabled, handle it here while allocating scaler
3485          * for NV12.
3486          */
3487         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
3488             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3489                 drm_dbg_kms(&dev_priv->drm,
3490                             "Pipe/Plane scaling not supported with IF-ID mode\n");
3491                 return -EINVAL;
3492         }
3493
3494         /*
3495          * if plane is being disabled or scaler is no more required or force detach
3496          *  - free scaler binded to this plane/crtc
3497          *  - in order to do this, update crtc->scaler_usage
3498          *
3499          * Here scaler state in crtc_state is set free so that
3500          * scaler can be assigned to other user. Actual register
3501          * update to free the scaler is done in plane/panel-fit programming.
3502          * For this purpose crtc/plane_state->scaler_id isn't reset here.
3503          */
3504         if (force_detach || !need_scaler) {
3505                 if (*scaler_id >= 0) {
3506                         scaler_state->scaler_users &= ~(1 << scaler_user);
3507                         scaler_state->scalers[*scaler_id].in_use = 0;
3508
3509                         drm_dbg_kms(&dev_priv->drm,
3510                                     "scaler_user index %u.%u: "
3511                                     "Staged freeing scaler id %d scaler_users = 0x%x\n",
3512                                     intel_crtc->pipe, scaler_user, *scaler_id,
3513                                     scaler_state->scaler_users);
3514                         *scaler_id = -1;
3515                 }
3516                 return 0;
3517         }
3518
3519         if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
3520             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
3521                 drm_dbg_kms(&dev_priv->drm,
3522                             "Planar YUV: src dimensions not met\n");
3523                 return -EINVAL;
3524         }
3525
3526         /* range checks */
3527         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
3528             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
3529             (INTEL_GEN(dev_priv) >= 11 &&
3530              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
3531               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
3532             (INTEL_GEN(dev_priv) < 11 &&
3533              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
3534               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
3535                 drm_dbg_kms(&dev_priv->drm,
3536                             "scaler_user index %u.%u: src %ux%u dst %ux%u "
3537                             "size is out of scaler range\n",
3538                             intel_crtc->pipe, scaler_user, src_w, src_h,
3539                             dst_w, dst_h);
3540                 return -EINVAL;
3541         }
3542
3543         /* mark this plane as a scaler user in crtc_state */
3544         scaler_state->scaler_users |= (1 << scaler_user);
3545         drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
3546                     "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
3547                     intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
3548                     scaler_state->scaler_users);
3549
3550         return 0;
3551 }
3552
3553 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
3554 {
3555         const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
3556         int width, height;
3557
3558         if (crtc_state->pch_pfit.enabled) {
3559                 width = drm_rect_width(&crtc_state->pch_pfit.dst);
3560                 height = drm_rect_height(&crtc_state->pch_pfit.dst);
3561         } else {
3562                 width = pipe_mode->crtc_hdisplay;
3563                 height = pipe_mode->crtc_vdisplay;
3564         }
3565         return skl_update_scaler(crtc_state, !crtc_state->hw.active,
3566                                  SKL_CRTC_INDEX,
3567                                  &crtc_state->scaler_state.scaler_id,
3568                                  crtc_state->pipe_src_w, crtc_state->pipe_src_h,
3569                                  width, height, NULL, 0,
3570                                  crtc_state->pch_pfit.enabled);
3571 }
3572
3573 /**
3574  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
3575  * @crtc_state: crtc's scaler state
3576  * @plane_state: atomic plane state to update
3577  *
3578  * Return
3579  *     0 - scaler_usage updated successfully
3580  *    error - requested scaling cannot be supported or other error condition
3581  */
3582 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
3583                                    struct intel_plane_state *plane_state)
3584 {
3585         struct intel_plane *intel_plane =
3586                 to_intel_plane(plane_state->uapi.plane);
3587         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
3588         struct drm_framebuffer *fb = plane_state->hw.fb;
3589         int ret;
3590         bool force_detach = !fb || !plane_state->uapi.visible;
3591         bool need_scaler = false;
3592
3593         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
3594         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
3595             fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
3596                 need_scaler = true;
3597
3598         ret = skl_update_scaler(crtc_state, force_detach,
3599                                 drm_plane_index(&intel_plane->base),
3600                                 &plane_state->scaler_id,
3601                                 drm_rect_width(&plane_state->uapi.src) >> 16,
3602                                 drm_rect_height(&plane_state->uapi.src) >> 16,
3603                                 drm_rect_width(&plane_state->uapi.dst),
3604                                 drm_rect_height(&plane_state->uapi.dst),
3605                                 fb ? fb->format : NULL,
3606                                 fb ? fb->modifier : 0,
3607                                 need_scaler);
3608
3609         if (ret || plane_state->scaler_id < 0)
3610                 return ret;
3611
3612         /* check colorkey */
3613         if (plane_state->ckey.flags) {
3614                 drm_dbg_kms(&dev_priv->drm,
3615                             "[PLANE:%d:%s] scaling with color key not allowed",
3616                             intel_plane->base.base.id,
3617                             intel_plane->base.name);
3618                 return -EINVAL;
3619         }
3620
3621         /* Check src format */
3622         switch (fb->format->format) {
3623         case DRM_FORMAT_RGB565:
3624         case DRM_FORMAT_XBGR8888:
3625         case DRM_FORMAT_XRGB8888:
3626         case DRM_FORMAT_ABGR8888:
3627         case DRM_FORMAT_ARGB8888:
3628         case DRM_FORMAT_XRGB2101010:
3629         case DRM_FORMAT_XBGR2101010:
3630         case DRM_FORMAT_ARGB2101010:
3631         case DRM_FORMAT_ABGR2101010:
3632         case DRM_FORMAT_YUYV:
3633         case DRM_FORMAT_YVYU:
3634         case DRM_FORMAT_UYVY:
3635         case DRM_FORMAT_VYUY:
3636         case DRM_FORMAT_NV12:
3637         case DRM_FORMAT_XYUV8888:
3638         case DRM_FORMAT_P010:
3639         case DRM_FORMAT_P012:
3640         case DRM_FORMAT_P016:
3641         case DRM_FORMAT_Y210:
3642         case DRM_FORMAT_Y212:
3643         case DRM_FORMAT_Y216:
3644         case DRM_FORMAT_XVYU2101010:
3645         case DRM_FORMAT_XVYU12_16161616:
3646         case DRM_FORMAT_XVYU16161616:
3647                 break;
3648         case DRM_FORMAT_XBGR16161616F:
3649         case DRM_FORMAT_ABGR16161616F:
3650         case DRM_FORMAT_XRGB16161616F:
3651         case DRM_FORMAT_ARGB16161616F:
3652                 if (INTEL_GEN(dev_priv) >= 11)
3653                         break;
3654                 fallthrough;
3655         default:
3656                 drm_dbg_kms(&dev_priv->drm,
3657                             "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
3658                             intel_plane->base.base.id, intel_plane->base.name,
3659                             fb->base.id, fb->format->format);
3660                 return -EINVAL;
3661         }
3662
3663         return 0;
3664 }
3665
3666 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
3667 {
3668         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3669         int i;
3670
3671         for (i = 0; i < crtc->num_scalers; i++)
3672                 skl_detach_scaler(crtc, i);
3673 }
3674
3675 static int cnl_coef_tap(int i)
3676 {
3677         return i % 7;
3678 }
3679
3680 static u16 cnl_nearest_filter_coef(int t)
3681 {
3682         return t == 3 ? 0x0800 : 0x3000;
3683 }
3684
3685 /*
3686  *  Theory behind setting nearest-neighbor integer scaling:
3687  *
3688  *  17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
3689  *  The letter represents the filter tap (D is the center tap) and the number
3690  *  represents the coefficient set for a phase (0-16).
3691  *
3692  *         +------------+------------------------+------------------------+
3693  *         |Index value | Data value coeffient 1 | Data value coeffient 2 |
3694  *         +------------+------------------------+------------------------+
3695  *         |   00h      |          B0            |          A0            |
3696  *         +------------+------------------------+------------------------+
3697  *         |   01h      |          D0            |          C0            |
3698  *         +------------+------------------------+------------------------+
3699  *         |   02h      |          F0            |          E0            |
3700  *         +------------+------------------------+------------------------+
3701  *         |   03h      |          A1            |          G0            |
3702  *         +------------+------------------------+------------------------+
3703  *         |   04h      |          C1            |          B1            |
3704  *         +------------+------------------------+------------------------+
3705  *         |   ...      |          ...           |          ...           |
3706  *         +------------+------------------------+------------------------+
3707  *         |   38h      |          B16           |          A16           |
3708  *         +------------+------------------------+------------------------+
3709  *         |   39h      |          D16           |          C16           |
3710  *         +------------+------------------------+------------------------+
3711  *         |   3Ah      |          F16           |          C16           |
3712  *         +------------+------------------------+------------------------+
3713  *         |   3Bh      |        Reserved        |          G16           |
3714  *         +------------+------------------------+------------------------+
3715  *
3716  *  To enable nearest-neighbor scaling:  program scaler coefficents with
3717  *  the center tap (Dxx) values set to 1 and all other values set to 0 as per
3718  *  SCALER_COEFFICIENT_FORMAT
3719  *
3720  */
3721
3722 static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
3723                                              enum pipe pipe, int id, int set)
3724 {
3725         int i;
3726
3727         intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
3728                           PS_COEE_INDEX_AUTO_INC);
3729
3730         for (i = 0; i < 17 * 7; i += 2) {
3731                 u32 tmp;
3732                 int t;
3733
3734                 t = cnl_coef_tap(i);
3735                 tmp = cnl_nearest_filter_coef(t);
3736
3737                 t = cnl_coef_tap(i + 1);
3738                 tmp |= cnl_nearest_filter_coef(t) << 16;
3739
3740                 intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
3741                                   tmp);
3742         }
3743
3744         intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
3745 }
3746
3747 u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
3748 {
3749         if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
3750                 return (PS_FILTER_PROGRAMMED |
3751                         PS_Y_VERT_FILTER_SELECT(set) |
3752                         PS_Y_HORZ_FILTER_SELECT(set) |
3753                         PS_UV_VERT_FILTER_SELECT(set) |
3754                         PS_UV_HORZ_FILTER_SELECT(set));
3755         }
3756
3757         return PS_FILTER_MEDIUM;
3758 }
3759
3760 void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
3761                              int id, int set, enum drm_scaling_filter filter)
3762 {
3763         switch (filter) {
3764         case DRM_SCALING_FILTER_DEFAULT:
3765                 break;
3766         case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
3767                 cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
3768                 break;
3769         default:
3770                 MISSING_CASE(filter);
3771         }
3772 }
3773
3774 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
3775 {
3776         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3777         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3778         const struct intel_crtc_scaler_state *scaler_state =
3779                 &crtc_state->scaler_state;
3780         struct drm_rect src = {
3781                 .x2 = crtc_state->pipe_src_w << 16,
3782                 .y2 = crtc_state->pipe_src_h << 16,
3783         };
3784         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3785         u16 uv_rgb_hphase, uv_rgb_vphase;
3786         enum pipe pipe = crtc->pipe;
3787         int width = drm_rect_width(dst);
3788         int height = drm_rect_height(dst);
3789         int x = dst->x1;
3790         int y = dst->y1;
3791         int hscale, vscale;
3792         unsigned long irqflags;
3793         int id;
3794         u32 ps_ctrl;
3795
3796         if (!crtc_state->pch_pfit.enabled)
3797                 return;
3798
3799         if (drm_WARN_ON(&dev_priv->drm,
3800                         crtc_state->scaler_state.scaler_id < 0))
3801                 return;
3802
3803         hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
3804         vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
3805
3806         uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
3807         uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
3808
3809         id = scaler_state->scaler_id;
3810
3811         ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
3812         ps_ctrl |=  PS_SCALER_EN | scaler_state->scalers[id].mode;
3813
3814         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3815
3816         skl_scaler_setup_filter(dev_priv, pipe, id, 0,
3817                                 crtc_state->hw.scaling_filter);
3818
3819         intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
3820
3821         intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
3822                           PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
3823         intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
3824                           PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
3825         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
3826                           x << 16 | y);
3827         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
3828                           width << 16 | height);
3829
3830         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3831 }
3832
3833 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
3834 {
3835         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3836         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3837         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
3838         enum pipe pipe = crtc->pipe;
3839         int width = drm_rect_width(dst);
3840         int height = drm_rect_height(dst);
3841         int x = dst->x1;
3842         int y = dst->y1;
3843
3844         if (!crtc_state->pch_pfit.enabled)
3845                 return;
3846
3847         /* Force use of hard-coded filter coefficients
3848          * as some pre-programmed values are broken,
3849          * e.g. x201.
3850          */
3851         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3852                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3853                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
3854         else
3855                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
3856                                PF_FILTER_MED_3x3);
3857         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
3858         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
3859 }
3860
3861 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
3862 {
3863         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3864         struct drm_device *dev = crtc->base.dev;
3865         struct drm_i915_private *dev_priv = to_i915(dev);
3866
3867         if (!crtc_state->ips_enabled)
3868                 return;
3869
3870         /*
3871          * We can only enable IPS after we enable a plane and wait for a vblank
3872          * This function is called from post_plane_update, which is run after
3873          * a vblank wait.
3874          */
3875         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
3876
3877         if (IS_BROADWELL(dev_priv)) {
3878                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
3879                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
3880                 /* Quoting Art Runyan: "its not safe to expect any particular
3881                  * value in IPS_CTL bit 31 after enabling IPS through the
3882                  * mailbox." Moreover, the mailbox may return a bogus state,
3883                  * so we need to just enable it and continue on.
3884                  */
3885         } else {
3886                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
3887                 /* The bit only becomes 1 in the next vblank, so this wait here
3888                  * is essentially intel_wait_for_vblank. If we don't have this
3889                  * and don't wait for vblanks until the end of crtc_enable, then
3890                  * the HW state readout code will complain that the expected
3891                  * IPS_CTL value is not the one we read. */
3892                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
3893                         drm_err(&dev_priv->drm,
3894                                 "Timed out waiting for IPS enable\n");
3895         }
3896 }
3897
3898 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
3899 {
3900         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3901         struct drm_device *dev = crtc->base.dev;
3902         struct drm_i915_private *dev_priv = to_i915(dev);
3903
3904         if (!crtc_state->ips_enabled)
3905                 return;
3906
3907         if (IS_BROADWELL(dev_priv)) {
3908                 drm_WARN_ON(dev,
3909                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3910                 /*
3911                  * Wait for PCODE to finish disabling IPS. The BSpec specified
3912                  * 42ms timeout value leads to occasional timeouts so use 100ms
3913                  * instead.
3914                  */
3915                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
3916                         drm_err(&dev_priv->drm,
3917                                 "Timed out waiting for IPS disable\n");
3918         } else {
3919                 intel_de_write(dev_priv, IPS_CTL, 0);
3920                 intel_de_posting_read(dev_priv, IPS_CTL);
3921         }
3922
3923         /* We need to wait for a vblank before we can disable the plane. */
3924         intel_wait_for_vblank(dev_priv, crtc->pipe);
3925 }
3926
3927 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
3928 {
3929         if (intel_crtc->overlay)
3930                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3931
3932         /* Let userspace switch the overlay on again. In most cases userspace
3933          * has to recompute where to put it anyway.
3934          */
3935 }
3936
3937 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
3938                                        const struct intel_crtc_state *new_crtc_state)
3939 {
3940         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3941         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3942
3943         if (!old_crtc_state->ips_enabled)
3944                 return false;
3945
3946         if (intel_crtc_needs_modeset(new_crtc_state))
3947                 return true;
3948
3949         /*
3950          * Workaround : Do not read or write the pipe palette/gamma data while
3951          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3952          *
3953          * Disable IPS before we program the LUT.
3954          */
3955         if (IS_HASWELL(dev_priv) &&
3956             (new_crtc_state->uapi.color_mgmt_changed ||
3957              new_crtc_state->update_pipe) &&
3958             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3959                 return true;
3960
3961         return !new_crtc_state->ips_enabled;
3962 }
3963
3964 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
3965                                        const struct intel_crtc_state *new_crtc_state)
3966 {
3967         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
3968         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3969
3970         if (!new_crtc_state->ips_enabled)
3971                 return false;
3972
3973         if (intel_crtc_needs_modeset(new_crtc_state))
3974                 return true;
3975
3976         /*
3977          * Workaround : Do not read or write the pipe palette/gamma data while
3978          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3979          *
3980          * Re-enable IPS after the LUT has been programmed.
3981          */
3982         if (IS_HASWELL(dev_priv) &&
3983             (new_crtc_state->uapi.color_mgmt_changed ||
3984              new_crtc_state->update_pipe) &&
3985             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
3986                 return true;
3987
3988         /*
3989          * We can't read out IPS on broadwell, assume the worst and
3990          * forcibly enable IPS on the first fastset.
3991          */
3992         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
3993                 return true;
3994
3995         return !old_crtc_state->ips_enabled;
3996 }
3997
3998 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
3999 {
4000         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4001
4002         if (!crtc_state->nv12_planes)
4003                 return false;
4004
4005         /* WA Display #0827: Gen9:all */
4006         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
4007                 return true;
4008
4009         return false;
4010 }
4011
4012 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
4013 {
4014         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4015
4016         /* Wa_2006604312:icl,ehl */
4017         if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
4018                 return true;
4019
4020         return false;
4021 }
4022
4023 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
4024                             const struct intel_crtc_state *new_crtc_state)
4025 {
4026         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
4027                 new_crtc_state->active_planes;
4028 }
4029
4030 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
4031                              const struct intel_crtc_state *new_crtc_state)
4032 {
4033         return old_crtc_state->active_planes &&
4034                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
4035 }
4036
4037 static void intel_post_plane_update(struct intel_atomic_state *state,
4038                                     struct intel_crtc *crtc)
4039 {
4040         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4041         const struct intel_crtc_state *old_crtc_state =
4042                 intel_atomic_get_old_crtc_state(state, crtc);
4043         const struct intel_crtc_state *new_crtc_state =
4044                 intel_atomic_get_new_crtc_state(state, crtc);
4045         enum pipe pipe = crtc->pipe;
4046
4047         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
4048
4049         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
4050                 intel_update_watermarks(crtc);
4051
4052         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
4053                 hsw_enable_ips(new_crtc_state);
4054
4055         intel_fbc_post_update(state, crtc);
4056
4057         if (needs_nv12_wa(old_crtc_state) &&
4058             !needs_nv12_wa(new_crtc_state))
4059                 skl_wa_827(dev_priv, pipe, false);
4060
4061         if (needs_scalerclk_wa(old_crtc_state) &&
4062             !needs_scalerclk_wa(new_crtc_state))
4063                 icl_wa_scalerclkgating(dev_priv, pipe, false);
4064 }
4065
4066 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
4067                                         struct intel_crtc *crtc)
4068 {
4069         const struct intel_crtc_state *crtc_state =
4070                 intel_atomic_get_new_crtc_state(state, crtc);
4071         u8 update_planes = crtc_state->update_planes;
4072         const struct intel_plane_state *plane_state;
4073         struct intel_plane *plane;
4074         int i;
4075
4076         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4077                 if (plane->enable_flip_done &&
4078                     plane->pipe == crtc->pipe &&
4079                     update_planes & BIT(plane->id))
4080                         plane->enable_flip_done(plane);
4081         }
4082 }
4083
4084 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
4085                                          struct intel_crtc *crtc)
4086 {
4087         const struct intel_crtc_state *crtc_state =
4088                 intel_atomic_get_new_crtc_state(state, crtc);
4089         u8 update_planes = crtc_state->update_planes;
4090         const struct intel_plane_state *plane_state;
4091         struct intel_plane *plane;
4092         int i;
4093
4094         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4095                 if (plane->disable_flip_done &&
4096                     plane->pipe == crtc->pipe &&
4097                     update_planes & BIT(plane->id))
4098                         plane->disable_flip_done(plane);
4099         }
4100 }
4101
4102 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
4103                                              struct intel_crtc *crtc)
4104 {
4105         struct drm_i915_private *i915 = to_i915(state->base.dev);
4106         const struct intel_crtc_state *old_crtc_state =
4107                 intel_atomic_get_old_crtc_state(state, crtc);
4108         const struct intel_crtc_state *new_crtc_state =
4109                 intel_atomic_get_new_crtc_state(state, crtc);
4110         u8 update_planes = new_crtc_state->update_planes;
4111         const struct intel_plane_state *old_plane_state;
4112         struct intel_plane *plane;
4113         bool need_vbl_wait = false;
4114         int i;
4115
4116         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
4117                 if (plane->need_async_flip_disable_wa &&
4118                     plane->pipe == crtc->pipe &&
4119                     update_planes & BIT(plane->id)) {
4120                         /*
4121                          * Apart from the async flip bit we want to
4122                          * preserve the old state for the plane.
4123                          */
4124                         plane->async_flip(plane, old_crtc_state,
4125                                           old_plane_state, false);
4126                         need_vbl_wait = true;
4127                 }
4128         }
4129
4130         if (need_vbl_wait)
4131                 intel_wait_for_vblank(i915, crtc->pipe);
4132 }
4133
4134 static void intel_pre_plane_update(struct intel_atomic_state *state,
4135                                    struct intel_crtc *crtc)
4136 {
4137         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4138         const struct intel_crtc_state *old_crtc_state =
4139                 intel_atomic_get_old_crtc_state(state, crtc);
4140         const struct intel_crtc_state *new_crtc_state =
4141                 intel_atomic_get_new_crtc_state(state, crtc);
4142         enum pipe pipe = crtc->pipe;
4143
4144         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
4145                 hsw_disable_ips(old_crtc_state);
4146
4147         if (intel_fbc_pre_update(state, crtc))
4148                 intel_wait_for_vblank(dev_priv, pipe);
4149
4150         /* Display WA 827 */
4151         if (!needs_nv12_wa(old_crtc_state) &&
4152             needs_nv12_wa(new_crtc_state))
4153                 skl_wa_827(dev_priv, pipe, true);
4154
4155         /* Wa_2006604312:icl,ehl */
4156         if (!needs_scalerclk_wa(old_crtc_state) &&
4157             needs_scalerclk_wa(new_crtc_state))
4158                 icl_wa_scalerclkgating(dev_priv, pipe, true);
4159
4160         /*
4161          * Vblank time updates from the shadow to live plane control register
4162          * are blocked if the memory self-refresh mode is active at that
4163          * moment. So to make sure the plane gets truly disabled, disable
4164          * first the self-refresh mode. The self-refresh enable bit in turn
4165          * will be checked/applied by the HW only at the next frame start
4166          * event which is after the vblank start event, so we need to have a
4167          * wait-for-vblank between disabling the plane and the pipe.
4168          */
4169         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
4170             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
4171                 intel_wait_for_vblank(dev_priv, pipe);
4172
4173         /*
4174          * IVB workaround: must disable low power watermarks for at least
4175          * one frame before enabling scaling.  LP watermarks can be re-enabled
4176          * when scaling is disabled.
4177          *
4178          * WaCxSRDisabledForSpriteScaling:ivb
4179          */
4180         if (old_crtc_state->hw.active &&
4181             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
4182                 intel_wait_for_vblank(dev_priv, pipe);
4183
4184         /*
4185          * If we're doing a modeset we don't need to do any
4186          * pre-vblank watermark programming here.
4187          */
4188         if (!intel_crtc_needs_modeset(new_crtc_state)) {
4189                 /*
4190                  * For platforms that support atomic watermarks, program the
4191                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
4192                  * will be the intermediate values that are safe for both pre- and
4193                  * post- vblank; when vblank happens, the 'active' values will be set
4194                  * to the final 'target' values and we'll do this again to get the
4195                  * optimal watermarks.  For gen9+ platforms, the values we program here
4196                  * will be the final target values which will get automatically latched
4197                  * at vblank time; no further programming will be necessary.
4198                  *
4199                  * If a platform hasn't been transitioned to atomic watermarks yet,
4200                  * we'll continue to update watermarks the old way, if flags tell
4201                  * us to.
4202                  */
4203                 if (dev_priv->display.initial_watermarks)
4204                         dev_priv->display.initial_watermarks(state, crtc);
4205                 else if (new_crtc_state->update_wm_pre)
4206                         intel_update_watermarks(crtc);
4207         }
4208
4209         /*
4210          * Gen2 reports pipe underruns whenever all planes are disabled.
4211          * So disable underrun reporting before all the planes get disabled.
4212          *
4213          * We do this after .initial_watermarks() so that we have a
4214          * chance of catching underruns with the intermediate watermarks
4215          * vs. the old plane configuration.
4216          */
4217         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
4218                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4219
4220         /*
4221          * WA for platforms where async address update enable bit
4222          * is double buffered and only latched at start of vblank.
4223          */
4224         if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
4225                 intel_crtc_async_flip_disable_wa(state, crtc);
4226 }
4227
4228 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
4229                                       struct intel_crtc *crtc)
4230 {
4231         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4232         const struct intel_crtc_state *new_crtc_state =
4233                 intel_atomic_get_new_crtc_state(state, crtc);
4234         unsigned int update_mask = new_crtc_state->update_planes;
4235         const struct intel_plane_state *old_plane_state;
4236         struct intel_plane *plane;
4237         unsigned fb_bits = 0;
4238         int i;
4239
4240         intel_crtc_dpms_overlay_disable(crtc);
4241
4242         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
4243                 if (crtc->pipe != plane->pipe ||
4244                     !(update_mask & BIT(plane->id)))
4245                         continue;
4246
4247                 intel_disable_plane(plane, new_crtc_state);
4248
4249                 if (old_plane_state->uapi.visible)
4250                         fb_bits |= plane->frontbuffer_bit;
4251         }
4252
4253         intel_frontbuffer_flip(dev_priv, fb_bits);
4254 }
4255
4256 /*
4257  * intel_connector_primary_encoder - get the primary encoder for a connector
4258  * @connector: connector for which to return the encoder
4259  *
4260  * Returns the primary encoder for a connector. There is a 1:1 mapping from
4261  * all connectors to their encoder, except for DP-MST connectors which have
4262  * both a virtual and a primary encoder. These DP-MST primary encoders can be
4263  * pointed to by as many DP-MST connectors as there are pipes.
4264  */
4265 static struct intel_encoder *
4266 intel_connector_primary_encoder(struct intel_connector *connector)
4267 {
4268         struct intel_encoder *encoder;
4269
4270         if (connector->mst_port)
4271                 return &dp_to_dig_port(connector->mst_port)->base;
4272
4273         encoder = intel_attached_encoder(connector);
4274         drm_WARN_ON(connector->base.dev, !encoder);
4275
4276         return encoder;
4277 }
4278
4279 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
4280 {
4281         struct drm_connector_state *new_conn_state;
4282         struct drm_connector *connector;
4283         int i;
4284
4285         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
4286                                         i) {
4287                 struct intel_connector *intel_connector;
4288                 struct intel_encoder *encoder;
4289                 struct intel_crtc *crtc;
4290
4291                 if (!intel_connector_needs_modeset(state, connector))
4292                         continue;
4293
4294                 intel_connector = to_intel_connector(connector);
4295                 encoder = intel_connector_primary_encoder(intel_connector);
4296                 if (!encoder->update_prepare)
4297                         continue;
4298
4299                 crtc = new_conn_state->crtc ?
4300                         to_intel_crtc(new_conn_state->crtc) : NULL;
4301                 encoder->update_prepare(state, encoder, crtc);
4302         }
4303 }
4304
4305 static void intel_encoders_update_complete(struct intel_atomic_state *state)
4306 {
4307         struct drm_connector_state *new_conn_state;
4308         struct drm_connector *connector;
4309         int i;
4310
4311         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
4312                                         i) {
4313                 struct intel_connector *intel_connector;
4314                 struct intel_encoder *encoder;
4315                 struct intel_crtc *crtc;
4316
4317                 if (!intel_connector_needs_modeset(state, connector))
4318                         continue;
4319
4320                 intel_connector = to_intel_connector(connector);
4321                 encoder = intel_connector_primary_encoder(intel_connector);
4322                 if (!encoder->update_complete)
4323                         continue;
4324
4325                 crtc = new_conn_state->crtc ?
4326                         to_intel_crtc(new_conn_state->crtc) : NULL;
4327                 encoder->update_complete(state, encoder, crtc);
4328         }
4329 }
4330
4331 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
4332                                           struct intel_crtc *crtc)
4333 {
4334         const struct intel_crtc_state *crtc_state =
4335                 intel_atomic_get_new_crtc_state(state, crtc);
4336         const struct drm_connector_state *conn_state;
4337         struct drm_connector *conn;
4338         int i;
4339
4340         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4341                 struct intel_encoder *encoder =
4342                         to_intel_encoder(conn_state->best_encoder);
4343
4344                 if (conn_state->crtc != &crtc->base)
4345                         continue;
4346
4347                 if (encoder->pre_pll_enable)
4348                         encoder->pre_pll_enable(state, encoder,
4349                                                 crtc_state, conn_state);
4350         }
4351 }
4352
4353 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
4354                                       struct intel_crtc *crtc)
4355 {
4356         const struct intel_crtc_state *crtc_state =
4357                 intel_atomic_get_new_crtc_state(state, crtc);
4358         const struct drm_connector_state *conn_state;
4359         struct drm_connector *conn;
4360         int i;
4361
4362         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4363                 struct intel_encoder *encoder =
4364                         to_intel_encoder(conn_state->best_encoder);
4365
4366                 if (conn_state->crtc != &crtc->base)
4367                         continue;
4368
4369                 if (encoder->pre_enable)
4370                         encoder->pre_enable(state, encoder,
4371                                             crtc_state, conn_state);
4372         }
4373 }
4374
4375 static void intel_encoders_enable(struct intel_atomic_state *state,
4376                                   struct intel_crtc *crtc)
4377 {
4378         const struct intel_crtc_state *crtc_state =
4379                 intel_atomic_get_new_crtc_state(state, crtc);
4380         const struct drm_connector_state *conn_state;
4381         struct drm_connector *conn;
4382         int i;
4383
4384         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4385                 struct intel_encoder *encoder =
4386                         to_intel_encoder(conn_state->best_encoder);
4387
4388                 if (conn_state->crtc != &crtc->base)
4389                         continue;
4390
4391                 if (encoder->enable)
4392                         encoder->enable(state, encoder,
4393                                         crtc_state, conn_state);
4394                 intel_opregion_notify_encoder(encoder, true);
4395         }
4396 }
4397
4398 static void intel_encoders_disable(struct intel_atomic_state *state,
4399                                    struct intel_crtc *crtc)
4400 {
4401         const struct intel_crtc_state *old_crtc_state =
4402                 intel_atomic_get_old_crtc_state(state, crtc);
4403         const struct drm_connector_state *old_conn_state;
4404         struct drm_connector *conn;
4405         int i;
4406
4407         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
4408                 struct intel_encoder *encoder =
4409                         to_intel_encoder(old_conn_state->best_encoder);
4410
4411                 if (old_conn_state->crtc != &crtc->base)
4412                         continue;
4413
4414                 intel_opregion_notify_encoder(encoder, false);
4415                 if (encoder->disable)
4416                         encoder->disable(state, encoder,
4417                                          old_crtc_state, old_conn_state);
4418         }
4419 }
4420
4421 static void intel_encoders_post_disable(struct intel_atomic_state *state,
4422                                         struct intel_crtc *crtc)
4423 {
4424         const struct intel_crtc_state *old_crtc_state =
4425                 intel_atomic_get_old_crtc_state(state, crtc);
4426         const struct drm_connector_state *old_conn_state;
4427         struct drm_connector *conn;
4428         int i;
4429
4430         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
4431                 struct intel_encoder *encoder =
4432                         to_intel_encoder(old_conn_state->best_encoder);
4433
4434                 if (old_conn_state->crtc != &crtc->base)
4435                         continue;
4436
4437                 if (encoder->post_disable)
4438                         encoder->post_disable(state, encoder,
4439                                               old_crtc_state, old_conn_state);
4440         }
4441 }
4442
4443 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
4444                                             struct intel_crtc *crtc)
4445 {
4446         const struct intel_crtc_state *old_crtc_state =
4447                 intel_atomic_get_old_crtc_state(state, crtc);
4448         const struct drm_connector_state *old_conn_state;
4449         struct drm_connector *conn;
4450         int i;
4451
4452         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
4453                 struct intel_encoder *encoder =
4454                         to_intel_encoder(old_conn_state->best_encoder);
4455
4456                 if (old_conn_state->crtc != &crtc->base)
4457                         continue;
4458
4459                 if (encoder->post_pll_disable)
4460                         encoder->post_pll_disable(state, encoder,
4461                                                   old_crtc_state, old_conn_state);
4462         }
4463 }
4464
4465 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
4466                                        struct intel_crtc *crtc)
4467 {
4468         const struct intel_crtc_state *crtc_state =
4469                 intel_atomic_get_new_crtc_state(state, crtc);
4470         const struct drm_connector_state *conn_state;
4471         struct drm_connector *conn;
4472         int i;
4473
4474         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4475                 struct intel_encoder *encoder =
4476                         to_intel_encoder(conn_state->best_encoder);
4477
4478                 if (conn_state->crtc != &crtc->base)
4479                         continue;
4480
4481                 if (encoder->update_pipe)
4482                         encoder->update_pipe(state, encoder,
4483                                              crtc_state, conn_state);
4484         }
4485 }
4486
4487 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
4488 {
4489         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4490         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4491
4492         plane->disable_plane(plane, crtc_state);
4493 }
4494
4495 static void ilk_crtc_enable(struct intel_atomic_state *state,
4496                             struct intel_crtc *crtc)
4497 {
4498         const struct intel_crtc_state *new_crtc_state =
4499                 intel_atomic_get_new_crtc_state(state, crtc);
4500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4501         enum pipe pipe = crtc->pipe;
4502
4503         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4504                 return;
4505
4506         /*
4507          * Sometimes spurious CPU pipe underruns happen during FDI
4508          * training, at least with VGA+HDMI cloning. Suppress them.
4509          *
4510          * On ILK we get an occasional spurious CPU pipe underruns
4511          * between eDP port A enable and vdd enable. Also PCH port
4512          * enable seems to result in the occasional CPU pipe underrun.
4513          *
4514          * Spurious PCH underruns also occur during PCH enabling.
4515          */
4516         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4517         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4518
4519         if (new_crtc_state->has_pch_encoder)
4520                 intel_prepare_shared_dpll(new_crtc_state);
4521
4522         if (intel_crtc_has_dp_encoder(new_crtc_state))
4523                 intel_dp_set_m_n(new_crtc_state, M1_N1);
4524
4525         intel_set_transcoder_timings(new_crtc_state);
4526         intel_set_pipe_src_size(new_crtc_state);
4527
4528         if (new_crtc_state->has_pch_encoder)
4529                 intel_cpu_transcoder_set_m_n(new_crtc_state,
4530                                              &new_crtc_state->fdi_m_n, NULL);
4531
4532         ilk_set_pipeconf(new_crtc_state);
4533
4534         crtc->active = true;
4535
4536         intel_encoders_pre_enable(state, crtc);
4537
4538         if (new_crtc_state->has_pch_encoder) {
4539                 /* Note: FDI PLL enabling _must_ be done before we enable the
4540                  * cpu pipes, hence this is separate from all the other fdi/pch
4541                  * enabling. */
4542                 ilk_fdi_pll_enable(new_crtc_state);
4543         } else {
4544                 assert_fdi_tx_disabled(dev_priv, pipe);
4545                 assert_fdi_rx_disabled(dev_priv, pipe);
4546         }
4547
4548         ilk_pfit_enable(new_crtc_state);
4549
4550         /*
4551          * On ILK+ LUT must be loaded before the pipe is running but with
4552          * clocks enabled
4553          */
4554         intel_color_load_luts(new_crtc_state);
4555         intel_color_commit(new_crtc_state);
4556         /* update DSPCNTR to configure gamma for pipe bottom color */
4557         intel_disable_primary_plane(new_crtc_state);
4558
4559         if (dev_priv->display.initial_watermarks)
4560                 dev_priv->display.initial_watermarks(state, crtc);
4561         intel_enable_pipe(new_crtc_state);
4562
4563         if (new_crtc_state->has_pch_encoder)
4564                 ilk_pch_enable(state, new_crtc_state);
4565
4566         intel_crtc_vblank_on(new_crtc_state);
4567
4568         intel_encoders_enable(state, crtc);
4569
4570         if (HAS_PCH_CPT(dev_priv))
4571                 cpt_verify_modeset(dev_priv, pipe);
4572
4573         /*
4574          * Must wait for vblank to avoid spurious PCH FIFO underruns.
4575          * And a second vblank wait is needed at least on ILK with
4576          * some interlaced HDMI modes. Let's do the double wait always
4577          * in case there are more corner cases we don't know about.
4578          */
4579         if (new_crtc_state->has_pch_encoder) {
4580                 intel_wait_for_vblank(dev_priv, pipe);
4581                 intel_wait_for_vblank(dev_priv, pipe);
4582         }
4583         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4584         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4585 }
4586
4587 /* IPS only exists on ULT machines and is tied to pipe A. */
4588 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4589 {
4590         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
4591 }
4592
4593 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
4594                                             enum pipe pipe, bool apply)
4595 {
4596         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
4597         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
4598
4599         if (apply)
4600                 val |= mask;
4601         else
4602                 val &= ~mask;
4603
4604         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
4605 }
4606
4607 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
4608 {
4609         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4610         enum pipe pipe = crtc->pipe;
4611         u32 val;
4612
4613         val = MBUS_DBOX_A_CREDIT(2);
4614
4615         if (INTEL_GEN(dev_priv) >= 12) {
4616                 val |= MBUS_DBOX_BW_CREDIT(2);
4617                 val |= MBUS_DBOX_B_CREDIT(12);
4618         } else {
4619                 val |= MBUS_DBOX_BW_CREDIT(1);
4620                 val |= MBUS_DBOX_B_CREDIT(8);
4621         }
4622
4623         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
4624 }
4625
4626 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
4627 {
4628         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4629         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4630
4631         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
4632                        HSW_LINETIME(crtc_state->linetime) |
4633                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
4634 }
4635
4636 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
4637 {
4638         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4639         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4640         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
4641         u32 val;
4642
4643         val = intel_de_read(dev_priv, reg);
4644         val &= ~HSW_FRAME_START_DELAY_MASK;
4645         val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4646         intel_de_write(dev_priv, reg, val);
4647 }
4648
4649 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
4650                                          const struct intel_crtc_state *crtc_state)
4651 {
4652         struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
4653         struct intel_crtc_state *master_crtc_state;
4654         struct drm_connector_state *conn_state;
4655         struct drm_connector *conn;
4656         struct intel_encoder *encoder = NULL;
4657         int i;
4658
4659         if (crtc_state->bigjoiner_slave)
4660                 master = crtc_state->bigjoiner_linked_crtc;
4661
4662         master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
4663
4664         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
4665                 if (conn_state->crtc != &master->base)
4666                         continue;
4667
4668                 encoder = to_intel_encoder(conn_state->best_encoder);
4669                 break;
4670         }
4671
4672         if (!crtc_state->bigjoiner_slave) {
4673                 /* need to enable VDSC, which we skipped in pre-enable */
4674                 intel_dsc_enable(encoder, crtc_state);
4675         } else {
4676                 /*
4677                  * Enable sequence steps 1-7 on bigjoiner master
4678                  */
4679                 intel_encoders_pre_pll_enable(state, master);
4680                 intel_enable_shared_dpll(master_crtc_state);
4681                 intel_encoders_pre_enable(state, master);
4682
4683                 /* and DSC on slave */
4684                 intel_dsc_enable(NULL, crtc_state);
4685         }
4686 }
4687
4688 static void hsw_crtc_enable(struct intel_atomic_state *state,
4689                             struct intel_crtc *crtc)
4690 {
4691         const struct intel_crtc_state *new_crtc_state =
4692                 intel_atomic_get_new_crtc_state(state, crtc);
4693         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4694         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
4695         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
4696         bool psl_clkgate_wa;
4697
4698         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
4699                 return;
4700
4701         if (!new_crtc_state->bigjoiner) {
4702                 intel_encoders_pre_pll_enable(state, crtc);
4703
4704                 if (new_crtc_state->shared_dpll)
4705                         intel_enable_shared_dpll(new_crtc_state);
4706
4707                 intel_encoders_pre_enable(state, crtc);
4708         } else {
4709                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
4710         }
4711
4712         intel_set_pipe_src_size(new_crtc_state);
4713         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
4714                 bdw_set_pipemisc(new_crtc_state);
4715
4716         if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
4717                 intel_set_transcoder_timings(new_crtc_state);
4718
4719                 if (cpu_transcoder != TRANSCODER_EDP)
4720                         intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
4721                                        new_crtc_state->pixel_multiplier - 1);
4722
4723                 if (new_crtc_state->has_pch_encoder)
4724                         intel_cpu_transcoder_set_m_n(new_crtc_state,
4725                                                      &new_crtc_state->fdi_m_n, NULL);
4726
4727                 hsw_set_frame_start_delay(new_crtc_state);
4728         }
4729
4730         if (!transcoder_is_dsi(cpu_transcoder))
4731                 hsw_set_pipeconf(new_crtc_state);
4732
4733         crtc->active = true;
4734
4735         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
4736         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
4737                 new_crtc_state->pch_pfit.enabled;
4738         if (psl_clkgate_wa)
4739                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
4740
4741         if (INTEL_GEN(dev_priv) >= 9)
4742                 skl_pfit_enable(new_crtc_state);
4743         else
4744                 ilk_pfit_enable(new_crtc_state);
4745
4746         /*
4747          * On ILK+ LUT must be loaded before the pipe is running but with
4748          * clocks enabled
4749          */
4750         intel_color_load_luts(new_crtc_state);
4751         intel_color_commit(new_crtc_state);
4752         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
4753         if (INTEL_GEN(dev_priv) < 9)
4754                 intel_disable_primary_plane(new_crtc_state);
4755
4756         hsw_set_linetime_wm(new_crtc_state);
4757
4758         if (INTEL_GEN(dev_priv) >= 11)
4759                 icl_set_pipe_chicken(crtc);
4760
4761         if (dev_priv->display.initial_watermarks)
4762                 dev_priv->display.initial_watermarks(state, crtc);
4763
4764         if (INTEL_GEN(dev_priv) >= 11)
4765                 icl_pipe_mbus_enable(crtc);
4766
4767         if (new_crtc_state->bigjoiner_slave) {
4768                 trace_intel_pipe_enable(crtc);
4769                 intel_crtc_vblank_on(new_crtc_state);
4770         }
4771
4772         intel_encoders_enable(state, crtc);
4773
4774         if (psl_clkgate_wa) {
4775                 intel_wait_for_vblank(dev_priv, pipe);
4776                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
4777         }
4778
4779         /* If we change the relative order between pipe/planes enabling, we need
4780          * to change the workaround. */
4781         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
4782         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
4783                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4784                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
4785         }
4786 }
4787
4788 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
4789 {
4790         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4791         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4792         enum pipe pipe = crtc->pipe;
4793
4794         /* To avoid upsetting the power well on haswell only disable the pfit if
4795          * it's in use. The hw state code will make sure we get this right. */
4796         if (!old_crtc_state->pch_pfit.enabled)
4797                 return;
4798
4799         intel_de_write(dev_priv, PF_CTL(pipe), 0);
4800         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
4801         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
4802 }
4803
4804 static void ilk_crtc_disable(struct intel_atomic_state *state,
4805                              struct intel_crtc *crtc)
4806 {
4807         const struct intel_crtc_state *old_crtc_state =
4808                 intel_atomic_get_old_crtc_state(state, crtc);
4809         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4810         enum pipe pipe = crtc->pipe;
4811
4812         /*
4813          * Sometimes spurious CPU pipe underruns happen when the
4814          * pipe is already disabled, but FDI RX/TX is still enabled.
4815          * Happens at least with VGA+HDMI cloning. Suppress them.
4816          */
4817         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4818         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4819
4820         intel_encoders_disable(state, crtc);
4821
4822         intel_crtc_vblank_off(old_crtc_state);
4823
4824         intel_disable_pipe(old_crtc_state);
4825
4826         ilk_pfit_disable(old_crtc_state);
4827
4828         if (old_crtc_state->has_pch_encoder)
4829                 ilk_fdi_disable(crtc);
4830
4831         intel_encoders_post_disable(state, crtc);
4832
4833         if (old_crtc_state->has_pch_encoder) {
4834                 ilk_disable_pch_transcoder(dev_priv, pipe);
4835
4836                 if (HAS_PCH_CPT(dev_priv)) {
4837                         i915_reg_t reg;
4838                         u32 temp;
4839
4840                         /* disable TRANS_DP_CTL */
4841                         reg = TRANS_DP_CTL(pipe);
4842                         temp = intel_de_read(dev_priv, reg);
4843                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4844                                   TRANS_DP_PORT_SEL_MASK);
4845                         temp |= TRANS_DP_PORT_SEL_NONE;
4846                         intel_de_write(dev_priv, reg, temp);
4847
4848                         /* disable DPLL_SEL */
4849                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
4850                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4851                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
4852                 }
4853
4854                 ilk_fdi_pll_disable(crtc);
4855         }
4856
4857         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4858         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4859 }
4860
4861 static void hsw_crtc_disable(struct intel_atomic_state *state,
4862                              struct intel_crtc *crtc)
4863 {
4864         /*
4865          * FIXME collapse everything to one hook.
4866          * Need care with mst->ddi interactions.
4867          */
4868         intel_encoders_disable(state, crtc);
4869         intel_encoders_post_disable(state, crtc);
4870 }
4871
4872 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
4873 {
4874         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4875         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4876
4877         if (!crtc_state->gmch_pfit.control)
4878                 return;
4879
4880         /*
4881          * The panel fitter should only be adjusted whilst the pipe is disabled,
4882          * according to register description and PRM.
4883          */
4884         drm_WARN_ON(&dev_priv->drm,
4885                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
4886         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
4887
4888         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
4889                        crtc_state->gmch_pfit.pgm_ratios);
4890         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
4891
4892         /* Border color in case we don't scale up to the full screen. Black by
4893          * default, change to something else for debugging. */
4894         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
4895 }
4896
4897 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
4898 {
4899         if (phy == PHY_NONE)
4900                 return false;
4901         else if (IS_ALDERLAKE_S(dev_priv))
4902                 return phy <= PHY_E;
4903         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
4904                 return phy <= PHY_D;
4905         else if (IS_JSL_EHL(dev_priv))
4906                 return phy <= PHY_C;
4907         else if (INTEL_GEN(dev_priv) >= 11)
4908                 return phy <= PHY_B;
4909         else
4910                 return false;
4911 }
4912
4913 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
4914 {
4915         if (IS_TIGERLAKE(dev_priv))
4916                 return phy >= PHY_D && phy <= PHY_I;
4917         else if (IS_ICELAKE(dev_priv))
4918                 return phy >= PHY_C && phy <= PHY_F;
4919         else
4920                 return false;
4921 }
4922
4923 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
4924 {
4925         if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
4926                 return PHY_B + port - PORT_TC1;
4927         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
4928                 return PHY_C + port - PORT_TC1;
4929         else if (IS_JSL_EHL(i915) && port == PORT_D)
4930                 return PHY_A;
4931
4932         return PHY_A + port - PORT_A;
4933 }
4934
4935 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
4936 {
4937         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
4938                 return TC_PORT_NONE;
4939
4940         if (INTEL_GEN(dev_priv) >= 12)
4941                 return TC_PORT_1 + port - PORT_TC1;
4942         else
4943                 return TC_PORT_1 + port - PORT_C;
4944 }
4945
4946 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
4947 {
4948         switch (port) {
4949         case PORT_A:
4950                 return POWER_DOMAIN_PORT_DDI_A_LANES;
4951         case PORT_B:
4952                 return POWER_DOMAIN_PORT_DDI_B_LANES;
4953         case PORT_C:
4954                 return POWER_DOMAIN_PORT_DDI_C_LANES;
4955         case PORT_D:
4956                 return POWER_DOMAIN_PORT_DDI_D_LANES;
4957         case PORT_E:
4958                 return POWER_DOMAIN_PORT_DDI_E_LANES;
4959         case PORT_F:
4960                 return POWER_DOMAIN_PORT_DDI_F_LANES;
4961         case PORT_G:
4962                 return POWER_DOMAIN_PORT_DDI_G_LANES;
4963         case PORT_H:
4964                 return POWER_DOMAIN_PORT_DDI_H_LANES;
4965         case PORT_I:
4966                 return POWER_DOMAIN_PORT_DDI_I_LANES;
4967         default:
4968                 MISSING_CASE(port);
4969                 return POWER_DOMAIN_PORT_OTHER;
4970         }
4971 }
4972
4973 enum intel_display_power_domain
4974 intel_aux_power_domain(struct intel_digital_port *dig_port)
4975 {
4976         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4977         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
4978
4979         if (intel_phy_is_tc(dev_priv, phy) &&
4980             dig_port->tc_mode == TC_PORT_TBT_ALT) {
4981                 switch (dig_port->aux_ch) {
4982                 case AUX_CH_C:
4983                         return POWER_DOMAIN_AUX_C_TBT;
4984                 case AUX_CH_D:
4985                         return POWER_DOMAIN_AUX_D_TBT;
4986                 case AUX_CH_E:
4987                         return POWER_DOMAIN_AUX_E_TBT;
4988                 case AUX_CH_F:
4989                         return POWER_DOMAIN_AUX_F_TBT;
4990                 case AUX_CH_G:
4991                         return POWER_DOMAIN_AUX_G_TBT;
4992                 case AUX_CH_H:
4993                         return POWER_DOMAIN_AUX_H_TBT;
4994                 case AUX_CH_I:
4995                         return POWER_DOMAIN_AUX_I_TBT;
4996                 default:
4997                         MISSING_CASE(dig_port->aux_ch);
4998                         return POWER_DOMAIN_AUX_C_TBT;
4999                 }
5000         }
5001
5002         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
5003 }
5004
5005 /*
5006  * Converts aux_ch to power_domain without caring about TBT ports for that use
5007  * intel_aux_power_domain()
5008  */
5009 enum intel_display_power_domain
5010 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
5011 {
5012         switch (aux_ch) {
5013         case AUX_CH_A:
5014                 return POWER_DOMAIN_AUX_A;
5015         case AUX_CH_B:
5016                 return POWER_DOMAIN_AUX_B;
5017         case AUX_CH_C:
5018                 return POWER_DOMAIN_AUX_C;
5019         case AUX_CH_D:
5020                 return POWER_DOMAIN_AUX_D;
5021         case AUX_CH_E:
5022                 return POWER_DOMAIN_AUX_E;
5023         case AUX_CH_F:
5024                 return POWER_DOMAIN_AUX_F;
5025         case AUX_CH_G:
5026                 return POWER_DOMAIN_AUX_G;
5027         case AUX_CH_H:
5028                 return POWER_DOMAIN_AUX_H;
5029         case AUX_CH_I:
5030                 return POWER_DOMAIN_AUX_I;
5031         default:
5032                 MISSING_CASE(aux_ch);
5033                 return POWER_DOMAIN_AUX_A;
5034         }
5035 }
5036
5037 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
5038 {
5039         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5040         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5041         struct drm_encoder *encoder;
5042         enum pipe pipe = crtc->pipe;
5043         u64 mask;
5044         enum transcoder transcoder = crtc_state->cpu_transcoder;
5045
5046         if (!crtc_state->hw.active)
5047                 return 0;
5048
5049         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5050         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5051         if (crtc_state->pch_pfit.enabled ||
5052             crtc_state->pch_pfit.force_thru)
5053                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5054
5055         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
5056                                   crtc_state->uapi.encoder_mask) {
5057                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5058
5059                 mask |= BIT_ULL(intel_encoder->power_domain);
5060         }
5061
5062         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5063                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5064
5065         if (crtc_state->shared_dpll)
5066                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
5067
5068         if (crtc_state->dsc.compression_enable)
5069                 mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
5070
5071         return mask;
5072 }
5073
5074 static u64
5075 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
5076 {
5077         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5078         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5079         enum intel_display_power_domain domain;
5080         u64 domains, new_domains, old_domains;
5081
5082         domains = get_crtc_power_domains(crtc_state);
5083
5084         new_domains = domains & ~crtc->enabled_power_domains.mask;
5085         old_domains = crtc->enabled_power_domains.mask & ~domains;
5086
5087         for_each_power_domain(domain, new_domains)
5088                 intel_display_power_get_in_set(dev_priv,
5089                                                &crtc->enabled_power_domains,
5090                                                domain);
5091
5092         return old_domains;
5093 }
5094
5095 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
5096                                            u64 domains)
5097 {
5098         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
5099                                             &crtc->enabled_power_domains,
5100                                             domains);
5101 }
5102
5103 static void valleyview_crtc_enable(struct intel_atomic_state *state,
5104                                    struct intel_crtc *crtc)
5105 {
5106         const struct intel_crtc_state *new_crtc_state =
5107                 intel_atomic_get_new_crtc_state(state, crtc);
5108         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5109         enum pipe pipe = crtc->pipe;
5110
5111         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
5112                 return;
5113
5114         if (intel_crtc_has_dp_encoder(new_crtc_state))
5115                 intel_dp_set_m_n(new_crtc_state, M1_N1);
5116
5117         intel_set_transcoder_timings(new_crtc_state);
5118         intel_set_pipe_src_size(new_crtc_state);
5119
5120         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
5121                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5122                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
5123         }
5124
5125         i9xx_set_pipeconf(new_crtc_state);
5126
5127         crtc->active = true;
5128
5129         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5130
5131         intel_encoders_pre_pll_enable(state, crtc);
5132
5133         if (IS_CHERRYVIEW(dev_priv)) {
5134                 chv_prepare_pll(crtc, new_crtc_state);
5135                 chv_enable_pll(crtc, new_crtc_state);
5136         } else {
5137                 vlv_prepare_pll(crtc, new_crtc_state);
5138                 vlv_enable_pll(crtc, new_crtc_state);
5139         }
5140
5141         intel_encoders_pre_enable(state, crtc);
5142
5143         i9xx_pfit_enable(new_crtc_state);
5144
5145         intel_color_load_luts(new_crtc_state);
5146         intel_color_commit(new_crtc_state);
5147         /* update DSPCNTR to configure gamma for pipe bottom color */
5148         intel_disable_primary_plane(new_crtc_state);
5149
5150         dev_priv->display.initial_watermarks(state, crtc);
5151         intel_enable_pipe(new_crtc_state);
5152
5153         intel_crtc_vblank_on(new_crtc_state);
5154
5155         intel_encoders_enable(state, crtc);
5156 }
5157
5158 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
5159 {
5160         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5161         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5162
5163         intel_de_write(dev_priv, FP0(crtc->pipe),
5164                        crtc_state->dpll_hw_state.fp0);
5165         intel_de_write(dev_priv, FP1(crtc->pipe),
5166                        crtc_state->dpll_hw_state.fp1);
5167 }
5168
5169 static void i9xx_crtc_enable(struct intel_atomic_state *state,
5170                              struct intel_crtc *crtc)
5171 {
5172         const struct intel_crtc_state *new_crtc_state =
5173                 intel_atomic_get_new_crtc_state(state, crtc);
5174         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5175         enum pipe pipe = crtc->pipe;
5176
5177         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
5178                 return;
5179
5180         i9xx_set_pll_dividers(new_crtc_state);
5181
5182         if (intel_crtc_has_dp_encoder(new_crtc_state))
5183                 intel_dp_set_m_n(new_crtc_state, M1_N1);
5184
5185         intel_set_transcoder_timings(new_crtc_state);
5186         intel_set_pipe_src_size(new_crtc_state);
5187
5188         i9xx_set_pipeconf(new_crtc_state);
5189
5190         crtc->active = true;
5191
5192         if (!IS_GEN(dev_priv, 2))
5193                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5194
5195         intel_encoders_pre_enable(state, crtc);
5196
5197         i9xx_enable_pll(crtc, new_crtc_state);
5198
5199         i9xx_pfit_enable(new_crtc_state);
5200
5201         intel_color_load_luts(new_crtc_state);
5202         intel_color_commit(new_crtc_state);
5203         /* update DSPCNTR to configure gamma for pipe bottom color */
5204         intel_disable_primary_plane(new_crtc_state);
5205
5206         if (dev_priv->display.initial_watermarks)
5207                 dev_priv->display.initial_watermarks(state, crtc);
5208         else
5209                 intel_update_watermarks(crtc);
5210         intel_enable_pipe(new_crtc_state);
5211
5212         intel_crtc_vblank_on(new_crtc_state);
5213
5214         intel_encoders_enable(state, crtc);
5215
5216         /* prevents spurious underruns */
5217         if (IS_GEN(dev_priv, 2))
5218                 intel_wait_for_vblank(dev_priv, pipe);
5219 }
5220
5221 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
5222 {
5223         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5224         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5225
5226         if (!old_crtc_state->gmch_pfit.control)
5227                 return;
5228
5229         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
5230
5231         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
5232                     intel_de_read(dev_priv, PFIT_CONTROL));
5233         intel_de_write(dev_priv, PFIT_CONTROL, 0);
5234 }
5235
5236 static void i9xx_crtc_disable(struct intel_atomic_state *state,
5237                               struct intel_crtc *crtc)
5238 {
5239         struct intel_crtc_state *old_crtc_state =
5240                 intel_atomic_get_old_crtc_state(state, crtc);
5241         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5242         enum pipe pipe = crtc->pipe;
5243
5244         /*
5245          * On gen2 planes are double buffered but the pipe isn't, so we must
5246          * wait for planes to fully turn off before disabling the pipe.
5247          */
5248         if (IS_GEN(dev_priv, 2))
5249                 intel_wait_for_vblank(dev_priv, pipe);
5250
5251         intel_encoders_disable(state, crtc);
5252
5253         intel_crtc_vblank_off(old_crtc_state);
5254
5255         intel_disable_pipe(old_crtc_state);
5256
5257         i9xx_pfit_disable(old_crtc_state);
5258
5259         intel_encoders_post_disable(state, crtc);
5260
5261         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
5262                 if (IS_CHERRYVIEW(dev_priv))
5263                         chv_disable_pll(dev_priv, pipe);
5264                 else if (IS_VALLEYVIEW(dev_priv))
5265                         vlv_disable_pll(dev_priv, pipe);
5266                 else
5267                         i9xx_disable_pll(old_crtc_state);
5268         }
5269
5270         intel_encoders_post_pll_disable(state, crtc);
5271
5272         if (!IS_GEN(dev_priv, 2))
5273                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5274
5275         if (!dev_priv->display.initial_watermarks)
5276                 intel_update_watermarks(crtc);
5277
5278         /* clock the pipe down to 640x480@60 to potentially save power */
5279         if (IS_I830(dev_priv))
5280                 i830_enable_pipe(dev_priv, pipe);
5281 }
5282
5283 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
5284                                         struct drm_modeset_acquire_ctx *ctx)
5285 {
5286         struct intel_encoder *encoder;
5287         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5288         struct intel_bw_state *bw_state =
5289                 to_intel_bw_state(dev_priv->bw_obj.state);
5290         struct intel_cdclk_state *cdclk_state =
5291                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
5292         struct intel_dbuf_state *dbuf_state =
5293                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
5294         struct intel_crtc_state *crtc_state =
5295                 to_intel_crtc_state(crtc->base.state);
5296         struct intel_plane *plane;
5297         struct drm_atomic_state *state;
5298         struct intel_crtc_state *temp_crtc_state;
5299         enum pipe pipe = crtc->pipe;
5300         int ret;
5301
5302         if (!crtc_state->hw.active)
5303                 return;
5304
5305         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5306                 const struct intel_plane_state *plane_state =
5307                         to_intel_plane_state(plane->base.state);
5308
5309                 if (plane_state->uapi.visible)
5310                         intel_plane_disable_noatomic(crtc, plane);
5311         }
5312
5313         state = drm_atomic_state_alloc(&dev_priv->drm);
5314         if (!state) {
5315                 drm_dbg_kms(&dev_priv->drm,
5316                             "failed to disable [CRTC:%d:%s], out of memory",
5317                             crtc->base.base.id, crtc->base.name);
5318                 return;
5319         }
5320
5321         state->acquire_ctx = ctx;
5322
5323         /* Everything's already locked, -EDEADLK can't happen. */
5324         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
5325         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
5326
5327         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
5328
5329         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
5330
5331         drm_atomic_state_put(state);
5332
5333         drm_dbg_kms(&dev_priv->drm,
5334                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
5335                     crtc->base.base.id, crtc->base.name);
5336
5337         crtc->active = false;
5338         crtc->base.enabled = false;
5339
5340         drm_WARN_ON(&dev_priv->drm,
5341                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
5342         crtc_state->uapi.active = false;
5343         crtc_state->uapi.connector_mask = 0;
5344         crtc_state->uapi.encoder_mask = 0;
5345         intel_crtc_free_hw_state(crtc_state);
5346         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
5347
5348         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
5349                 encoder->base.crtc = NULL;
5350
5351         intel_fbc_disable(crtc);
5352         intel_update_watermarks(crtc);
5353         intel_disable_shared_dpll(crtc_state);
5354
5355         intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
5356
5357         dev_priv->active_pipes &= ~BIT(pipe);
5358         cdclk_state->min_cdclk[pipe] = 0;
5359         cdclk_state->min_voltage_level[pipe] = 0;
5360         cdclk_state->active_pipes &= ~BIT(pipe);
5361
5362         dbuf_state->active_pipes &= ~BIT(pipe);
5363
5364         bw_state->data_rate[pipe] = 0;
5365         bw_state->num_active_planes[pipe] = 0;
5366 }
5367
5368 /*
5369  * turn all crtc's off, but do not adjust state
5370  * This has to be paired with a call to intel_modeset_setup_hw_state.
5371  */
5372 int intel_display_suspend(struct drm_device *dev)
5373 {
5374         struct drm_i915_private *dev_priv = to_i915(dev);
5375         struct drm_atomic_state *state;
5376         int ret;
5377
5378         state = drm_atomic_helper_suspend(dev);
5379         ret = PTR_ERR_OR_ZERO(state);
5380         if (ret)
5381                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
5382                         ret);
5383         else
5384                 dev_priv->modeset_restore_state = state;
5385         return ret;
5386 }
5387
5388 void intel_encoder_destroy(struct drm_encoder *encoder)
5389 {
5390         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5391
5392         drm_encoder_cleanup(encoder);
5393         kfree(intel_encoder);
5394 }
5395
5396 /* Cross check the actual hw state with our own modeset state tracking (and it's
5397  * internal consistency). */
5398 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
5399                                          struct drm_connector_state *conn_state)
5400 {
5401         struct intel_connector *connector = to_intel_connector(conn_state->connector);
5402         struct drm_i915_private *i915 = to_i915(connector->base.dev);
5403
5404         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
5405                     connector->base.base.id, connector->base.name);
5406
5407         if (connector->get_hw_state(connector)) {
5408                 struct intel_encoder *encoder = intel_attached_encoder(connector);
5409
5410                 I915_STATE_WARN(!crtc_state,
5411                          "connector enabled without attached crtc\n");
5412
5413                 if (!crtc_state)
5414                         return;
5415
5416                 I915_STATE_WARN(!crtc_state->hw.active,
5417                                 "connector is active, but attached crtc isn't\n");
5418
5419                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
5420                         return;
5421
5422                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
5423                         "atomic encoder doesn't match attached encoder\n");
5424
5425                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
5426                         "attached encoder crtc differs from connector crtc\n");
5427         } else {
5428                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
5429                                 "attached crtc is active, but connector isn't\n");
5430                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
5431                         "best encoder set without crtc!\n");
5432         }
5433 }
5434
5435 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
5436 {
5437         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5438         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5439
5440         /* IPS only exists on ULT machines and is tied to pipe A. */
5441         if (!hsw_crtc_supports_ips(crtc))
5442                 return false;
5443
5444         if (!dev_priv->params.enable_ips)
5445                 return false;
5446
5447         if (crtc_state->pipe_bpp > 24)
5448                 return false;
5449
5450         /*
5451          * We compare against max which means we must take
5452          * the increased cdclk requirement into account when
5453          * calculating the new cdclk.
5454          *
5455          * Should measure whether using a lower cdclk w/o IPS
5456          */
5457         if (IS_BROADWELL(dev_priv) &&
5458             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
5459                 return false;
5460
5461         return true;
5462 }
5463
5464 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
5465 {
5466         struct drm_i915_private *dev_priv =
5467                 to_i915(crtc_state->uapi.crtc->dev);
5468         struct intel_atomic_state *state =
5469                 to_intel_atomic_state(crtc_state->uapi.state);
5470
5471         crtc_state->ips_enabled = false;
5472
5473         if (!hsw_crtc_state_ips_capable(crtc_state))
5474                 return 0;
5475
5476         /*
5477          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
5478          * enabled and disabled dynamically based on package C states,
5479          * user space can't make reliable use of the CRCs, so let's just
5480          * completely disable it.
5481          */
5482         if (crtc_state->crc_enabled)
5483                 return 0;
5484
5485         /* IPS should be fine as long as at least one plane is enabled. */
5486         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
5487                 return 0;
5488
5489         if (IS_BROADWELL(dev_priv)) {
5490                 const struct intel_cdclk_state *cdclk_state;
5491
5492                 cdclk_state = intel_atomic_get_cdclk_state(state);
5493                 if (IS_ERR(cdclk_state))
5494                         return PTR_ERR(cdclk_state);
5495
5496                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
5497                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
5498                         return 0;
5499         }
5500
5501         crtc_state->ips_enabled = true;
5502
5503         return 0;
5504 }
5505
5506 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
5507 {
5508         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5509
5510         /* GDG double wide on either pipe, otherwise pipe A only */
5511         return INTEL_GEN(dev_priv) < 4 &&
5512                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
5513 }
5514
5515 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
5516 {
5517         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
5518         unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
5519
5520         /*
5521          * We only use IF-ID interlacing. If we ever use
5522          * PF-ID we'll need to adjust the pixel_rate here.
5523          */
5524
5525         if (!crtc_state->pch_pfit.enabled)
5526                 return pixel_rate;
5527
5528         pipe_w = crtc_state->pipe_src_w;
5529         pipe_h = crtc_state->pipe_src_h;
5530
5531         pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
5532         pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
5533
5534         if (pipe_w < pfit_w)
5535                 pipe_w = pfit_w;
5536         if (pipe_h < pfit_h)
5537                 pipe_h = pfit_h;
5538
5539         if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
5540                         !pfit_w || !pfit_h))
5541                 return pixel_rate;
5542
5543         return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
5544                        pfit_w * pfit_h);
5545 }
5546
5547 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
5548                                          const struct drm_display_mode *timings)
5549 {
5550         mode->hdisplay = timings->crtc_hdisplay;
5551         mode->htotal = timings->crtc_htotal;
5552         mode->hsync_start = timings->crtc_hsync_start;
5553         mode->hsync_end = timings->crtc_hsync_end;
5554
5555         mode->vdisplay = timings->crtc_vdisplay;
5556         mode->vtotal = timings->crtc_vtotal;
5557         mode->vsync_start = timings->crtc_vsync_start;
5558         mode->vsync_end = timings->crtc_vsync_end;
5559
5560         mode->flags = timings->flags;
5561         mode->type = DRM_MODE_TYPE_DRIVER;
5562
5563         mode->clock = timings->crtc_clock;
5564
5565         drm_mode_set_name(mode);
5566 }
5567
5568 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
5569 {
5570         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
5571
5572         if (HAS_GMCH(dev_priv))
5573                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
5574                 crtc_state->pixel_rate =
5575                         crtc_state->hw.pipe_mode.crtc_clock;
5576         else
5577                 crtc_state->pixel_rate =
5578                         ilk_pipe_pixel_rate(crtc_state);
5579 }
5580
5581 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
5582 {
5583         struct drm_display_mode *mode = &crtc_state->hw.mode;
5584         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
5585         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
5586
5587         drm_mode_copy(pipe_mode, adjusted_mode);
5588
5589         if (crtc_state->bigjoiner) {
5590                 /*
5591                  * transcoder is programmed to the full mode,
5592                  * but pipe timings are half of the transcoder mode
5593                  */
5594                 pipe_mode->crtc_hdisplay /= 2;
5595                 pipe_mode->crtc_hblank_start /= 2;
5596                 pipe_mode->crtc_hblank_end /= 2;
5597                 pipe_mode->crtc_hsync_start /= 2;
5598                 pipe_mode->crtc_hsync_end /= 2;
5599                 pipe_mode->crtc_htotal /= 2;
5600                 pipe_mode->crtc_clock /= 2;
5601         }
5602
5603         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
5604         intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
5605
5606         intel_crtc_compute_pixel_rate(crtc_state);
5607
5608         drm_mode_copy(mode, adjusted_mode);
5609         mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
5610         mode->vdisplay = crtc_state->pipe_src_h;
5611 }
5612
5613 static void intel_encoder_get_config(struct intel_encoder *encoder,
5614                                      struct intel_crtc_state *crtc_state)
5615 {
5616         encoder->get_config(encoder, crtc_state);
5617
5618         intel_crtc_readout_derived_state(crtc_state);
5619 }
5620
5621 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5622                                      struct intel_crtc_state *pipe_config)
5623 {
5624         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5625         struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
5626         int clock_limit = dev_priv->max_dotclk_freq;
5627
5628         drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
5629
5630         /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
5631         if (pipe_config->bigjoiner) {
5632                 pipe_mode->crtc_clock /= 2;
5633                 pipe_mode->crtc_hdisplay /= 2;
5634                 pipe_mode->crtc_hblank_start /= 2;
5635                 pipe_mode->crtc_hblank_end /= 2;
5636                 pipe_mode->crtc_hsync_start /= 2;
5637                 pipe_mode->crtc_hsync_end /= 2;
5638                 pipe_mode->crtc_htotal /= 2;
5639                 pipe_config->pipe_src_w /= 2;
5640         }
5641
5642         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
5643
5644         if (INTEL_GEN(dev_priv) < 4) {
5645                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
5646
5647                 /*
5648                  * Enable double wide mode when the dot clock
5649                  * is > 90% of the (display) core speed.
5650                  */
5651                 if (intel_crtc_supports_double_wide(crtc) &&
5652                     pipe_mode->crtc_clock > clock_limit) {
5653                         clock_limit = dev_priv->max_dotclk_freq;
5654                         pipe_config->double_wide = true;
5655                 }
5656         }
5657
5658         if (pipe_mode->crtc_clock > clock_limit) {
5659                 drm_dbg_kms(&dev_priv->drm,
5660                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
5661                             pipe_mode->crtc_clock, clock_limit,
5662                             yesno(pipe_config->double_wide));
5663                 return -EINVAL;
5664         }
5665
5666         /*
5667          * Pipe horizontal size must be even in:
5668          * - DVO ganged mode
5669          * - LVDS dual channel mode
5670          * - Double wide pipe
5671          */
5672         if (pipe_config->pipe_src_w & 1) {
5673                 if (pipe_config->double_wide) {
5674                         drm_dbg_kms(&dev_priv->drm,
5675                                     "Odd pipe source width not supported with double wide pipe\n");
5676                         return -EINVAL;
5677                 }
5678
5679                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
5680                     intel_is_dual_link_lvds(dev_priv)) {
5681                         drm_dbg_kms(&dev_priv->drm,
5682                                     "Odd pipe source width not supported with dual link LVDS\n");
5683                         return -EINVAL;
5684                 }
5685         }
5686
5687         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5688          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5689          */
5690         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
5691             pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
5692                 return -EINVAL;
5693
5694         intel_crtc_compute_pixel_rate(pipe_config);
5695
5696         if (pipe_config->has_pch_encoder)
5697                 return ilk_fdi_compute_config(crtc, pipe_config);
5698
5699         return 0;
5700 }
5701
5702 static void
5703 intel_reduce_m_n_ratio(u32 *num, u32 *den)
5704 {
5705         while (*num > DATA_LINK_M_N_MASK ||
5706                *den > DATA_LINK_M_N_MASK) {
5707                 *num >>= 1;
5708                 *den >>= 1;
5709         }
5710 }
5711
5712 static void compute_m_n(unsigned int m, unsigned int n,
5713                         u32 *ret_m, u32 *ret_n,
5714                         bool constant_n)
5715 {
5716         /*
5717          * Several DP dongles in particular seem to be fussy about
5718          * too large link M/N values. Give N value as 0x8000 that
5719          * should be acceptable by specific devices. 0x8000 is the
5720          * specified fixed N value for asynchronous clock mode,
5721          * which the devices expect also in synchronous clock mode.
5722          */
5723         if (constant_n)
5724                 *ret_n = DP_LINK_CONSTANT_N_VALUE;
5725         else
5726                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5727
5728         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
5729         intel_reduce_m_n_ratio(ret_m, ret_n);
5730 }
5731
5732 void
5733 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
5734                        int pixel_clock, int link_clock,
5735                        struct intel_link_m_n *m_n,
5736                        bool constant_n, bool fec_enable)
5737 {
5738         u32 data_clock = bits_per_pixel * pixel_clock;
5739
5740         if (fec_enable)
5741                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
5742
5743         m_n->tu = 64;
5744         compute_m_n(data_clock,
5745                     link_clock * nlanes * 8,
5746                     &m_n->gmch_m, &m_n->gmch_n,
5747                     constant_n);
5748
5749         compute_m_n(pixel_clock, link_clock,
5750                     &m_n->link_m, &m_n->link_n,
5751                     constant_n);
5752 }
5753
5754 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
5755 {
5756         /*
5757          * There may be no VBT; and if the BIOS enabled SSC we can
5758          * just keep using it to avoid unnecessary flicker.  Whereas if the
5759          * BIOS isn't using it, don't assume it will work even if the VBT
5760          * indicates as much.
5761          */
5762         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5763                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
5764                                                        PCH_DREF_CONTROL) &
5765                         DREF_SSC1_ENABLE;
5766
5767                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
5768                         drm_dbg_kms(&dev_priv->drm,
5769                                     "SSC %s by BIOS, overriding VBT which says %s\n",
5770                                     enableddisabled(bios_lvds_use_ssc),
5771                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
5772                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
5773                 }
5774         }
5775 }
5776
5777 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5778                 pipe)
5779 {
5780         u32 reg_val;
5781
5782         /*
5783          * PLLB opamp always calibrates to max value of 0x3f, force enable it
5784          * and set it to a reasonable value instead.
5785          */
5786         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5787         reg_val &= 0xffffff00;
5788         reg_val |= 0x00000030;
5789         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5790
5791         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5792         reg_val &= 0x00ffffff;
5793         reg_val |= 0x8c000000;
5794         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5795
5796         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5797         reg_val &= 0xffffff00;
5798         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5799
5800         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5801         reg_val &= 0x00ffffff;
5802         reg_val |= 0xb0000000;
5803         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5804 }
5805
5806 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5807                                          const struct intel_link_m_n *m_n)
5808 {
5809         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5810         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5811         enum pipe pipe = crtc->pipe;
5812
5813         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
5814                        TU_SIZE(m_n->tu) | m_n->gmch_m);
5815         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5816         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5817         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5818 }
5819
5820 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
5821                                  enum transcoder transcoder)
5822 {
5823         if (IS_HASWELL(dev_priv))
5824                 return transcoder == TRANSCODER_EDP;
5825
5826         /*
5827          * Strictly speaking some registers are available before
5828          * gen7, but we only support DRRS on gen7+
5829          */
5830         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
5831 }
5832
5833 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
5834                                          const struct intel_link_m_n *m_n,
5835                                          const struct intel_link_m_n *m2_n2)
5836 {
5837         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5838         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5839         enum pipe pipe = crtc->pipe;
5840         enum transcoder transcoder = crtc_state->cpu_transcoder;
5841
5842         if (INTEL_GEN(dev_priv) >= 5) {
5843                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
5844                                TU_SIZE(m_n->tu) | m_n->gmch_m);
5845                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
5846                                m_n->gmch_n);
5847                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
5848                                m_n->link_m);
5849                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
5850                                m_n->link_n);
5851                 /*
5852                  *  M2_N2 registers are set only if DRRS is supported
5853                  * (to make sure the registers are not unnecessarily accessed).
5854                  */
5855                 if (m2_n2 && crtc_state->has_drrs &&
5856                     transcoder_has_m2_n2(dev_priv, transcoder)) {
5857                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
5858                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5859                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
5860                                        m2_n2->gmch_n);
5861                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
5862                                        m2_n2->link_m);
5863                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
5864                                        m2_n2->link_n);
5865                 }
5866         } else {
5867                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
5868                                TU_SIZE(m_n->tu) | m_n->gmch_m);
5869                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5870                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
5871                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
5872         }
5873 }
5874
5875 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
5876 {
5877         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5878         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
5879
5880         if (m_n == M1_N1) {
5881                 dp_m_n = &crtc_state->dp_m_n;
5882                 dp_m2_n2 = &crtc_state->dp_m2_n2;
5883         } else if (m_n == M2_N2) {
5884
5885                 /*
5886                  * M2_N2 registers are not supported. Hence m2_n2 divider value
5887                  * needs to be programmed into M1_N1.
5888                  */
5889                 dp_m_n = &crtc_state->dp_m2_n2;
5890         } else {
5891                 drm_err(&i915->drm, "Unsupported divider value\n");
5892                 return;
5893         }
5894
5895         if (crtc_state->has_pch_encoder)
5896                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
5897         else
5898                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
5899 }
5900
5901 static void vlv_prepare_pll(struct intel_crtc *crtc,
5902                             const struct intel_crtc_state *pipe_config)
5903 {
5904         struct drm_device *dev = crtc->base.dev;
5905         struct drm_i915_private *dev_priv = to_i915(dev);
5906         enum pipe pipe = crtc->pipe;
5907         u32 mdiv;
5908         u32 bestn, bestm1, bestm2, bestp1, bestp2;
5909         u32 coreclk, reg_val;
5910
5911         /* Enable Refclk */
5912         intel_de_write(dev_priv, DPLL(pipe),
5913                        pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
5914
5915         /* No need to actually set up the DPLL with DSI */
5916         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
5917                 return;
5918
5919         vlv_dpio_get(dev_priv);
5920
5921         bestn = pipe_config->dpll.n;
5922         bestm1 = pipe_config->dpll.m1;
5923         bestm2 = pipe_config->dpll.m2;
5924         bestp1 = pipe_config->dpll.p1;
5925         bestp2 = pipe_config->dpll.p2;
5926
5927         /* See eDP HDMI DPIO driver vbios notes doc */
5928
5929         /* PLL B needs special handling */
5930         if (pipe == PIPE_B)
5931                 vlv_pllb_recal_opamp(dev_priv, pipe);
5932
5933         /* Set up Tx target for periodic Rcomp update */
5934         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5935
5936         /* Disable target IRef on PLL */
5937         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5938         reg_val &= 0x00ffffff;
5939         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5940
5941         /* Disable fast lock */
5942         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5943
5944         /* Set idtafcrecal before PLL is enabled */
5945         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5946         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5947         mdiv |= ((bestn << DPIO_N_SHIFT));
5948         mdiv |= (1 << DPIO_K_SHIFT);
5949
5950         /*
5951          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5952          * but we don't support that).
5953          * Note: don't use the DAC post divider as it seems unstable.
5954          */
5955         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5956         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5957
5958         mdiv |= DPIO_ENABLE_CALIBRATION;
5959         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5960
5961         /* Set HBR and RBR LPF coefficients */
5962         if (pipe_config->port_clock == 162000 ||
5963             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
5964             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
5965                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5966                                  0x009f0003);
5967         else
5968                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5969                                  0x00d0000f);
5970
5971         if (intel_crtc_has_dp_encoder(pipe_config)) {
5972                 /* Use SSC source */
5973                 if (pipe == PIPE_A)
5974                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5975                                          0x0df40000);
5976                 else
5977                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5978                                          0x0df70000);
5979         } else { /* HDMI or VGA */
5980                 /* Use bend source */
5981                 if (pipe == PIPE_A)
5982                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5983                                          0x0df70000);
5984                 else
5985                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5986                                          0x0df40000);
5987         }
5988
5989         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5990         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5991         if (intel_crtc_has_dp_encoder(pipe_config))
5992                 coreclk |= 0x01000000;
5993         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5994
5995         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5996
5997         vlv_dpio_put(dev_priv);
5998 }
5999
6000 static void chv_prepare_pll(struct intel_crtc *crtc,
6001                             const struct intel_crtc_state *pipe_config)
6002 {
6003         struct drm_device *dev = crtc->base.dev;
6004         struct drm_i915_private *dev_priv = to_i915(dev);
6005         enum pipe pipe = crtc->pipe;
6006         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6007         u32 loopfilter, tribuf_calcntr;
6008         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6009         u32 dpio_val;
6010         int vco;
6011
6012         /* Enable Refclk and SSC */
6013         intel_de_write(dev_priv, DPLL(pipe),
6014                        pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6015
6016         /* No need to actually set up the DPLL with DSI */
6017         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6018                 return;
6019
6020         bestn = pipe_config->dpll.n;
6021         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6022         bestm1 = pipe_config->dpll.m1;
6023         bestm2 = pipe_config->dpll.m2 >> 22;
6024         bestp1 = pipe_config->dpll.p1;
6025         bestp2 = pipe_config->dpll.p2;
6026         vco = pipe_config->dpll.vco;
6027         dpio_val = 0;
6028         loopfilter = 0;
6029
6030         vlv_dpio_get(dev_priv);
6031
6032         /* p1 and p2 divider */
6033         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6034                         5 << DPIO_CHV_S1_DIV_SHIFT |
6035                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6036                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6037                         1 << DPIO_CHV_K_DIV_SHIFT);
6038
6039         /* Feedback post-divider - m2 */
6040         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6041
6042         /* Feedback refclk divider - n and m1 */
6043         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6044                         DPIO_CHV_M1_DIV_BY_2 |
6045                         1 << DPIO_CHV_N_DIV_SHIFT);
6046
6047         /* M2 fraction division */
6048         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6049
6050         /* M2 fraction division enable */
6051         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6052         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
6053         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
6054         if (bestm2_frac)
6055                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
6056         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
6057
6058         /* Program digital lock detect threshold */
6059         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
6060         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
6061                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
6062         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
6063         if (!bestm2_frac)
6064                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
6065         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
6066
6067         /* Loop filter */
6068         if (vco == 5400000) {
6069                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
6070                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
6071                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
6072                 tribuf_calcntr = 0x9;
6073         } else if (vco <= 6200000) {
6074                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
6075                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
6076                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6077                 tribuf_calcntr = 0x9;
6078         } else if (vco <= 6480000) {
6079                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6080                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6081                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6082                 tribuf_calcntr = 0x8;
6083         } else {
6084                 /* Not supported. Apply the same limits as in the max case */
6085                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
6086                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
6087                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
6088                 tribuf_calcntr = 0;
6089         }
6090         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6091
6092         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
6093         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
6094         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
6095         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
6096
6097         /* AFC Recal */
6098         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6099                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6100                         DPIO_AFC_RECAL);
6101
6102         vlv_dpio_put(dev_priv);
6103 }
6104
6105 /**
6106  * vlv_force_pll_on - forcibly enable just the PLL
6107  * @dev_priv: i915 private structure
6108  * @pipe: pipe PLL to enable
6109  * @dpll: PLL configuration
6110  *
6111  * Enable the PLL for @pipe using the supplied @dpll config. To be used
6112  * in cases where we need the PLL enabled even when @pipe is not going to
6113  * be enabled.
6114  */
6115 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
6116                      const struct dpll *dpll)
6117 {
6118         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6119         struct intel_crtc_state *pipe_config;
6120
6121         pipe_config = intel_crtc_state_alloc(crtc);
6122         if (!pipe_config)
6123                 return -ENOMEM;
6124
6125         pipe_config->cpu_transcoder = (enum transcoder)pipe;
6126         pipe_config->pixel_multiplier = 1;
6127         pipe_config->dpll = *dpll;
6128
6129         if (IS_CHERRYVIEW(dev_priv)) {
6130                 chv_compute_dpll(crtc, pipe_config);
6131                 chv_prepare_pll(crtc, pipe_config);
6132                 chv_enable_pll(crtc, pipe_config);
6133         } else {
6134                 vlv_compute_dpll(crtc, pipe_config);
6135                 vlv_prepare_pll(crtc, pipe_config);
6136                 vlv_enable_pll(crtc, pipe_config);
6137         }
6138
6139         kfree(pipe_config);
6140
6141         return 0;
6142 }
6143
6144 /**
6145  * vlv_force_pll_off - forcibly disable just the PLL
6146  * @dev_priv: i915 private structure
6147  * @pipe: pipe PLL to disable
6148  *
6149  * Disable the PLL for @pipe. To be used in cases where we need
6150  * the PLL enabled even when @pipe is not going to be enabled.
6151  */
6152 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
6153 {
6154         if (IS_CHERRYVIEW(dev_priv))
6155                 chv_disable_pll(dev_priv, pipe);
6156         else
6157                 vlv_disable_pll(dev_priv, pipe);
6158 }
6159
6160
6161
6162 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
6163 {
6164         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6165         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6166         enum pipe pipe = crtc->pipe;
6167         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6168         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
6169         u32 crtc_vtotal, crtc_vblank_end;
6170         int vsyncshift = 0;
6171
6172         /* We need to be careful not to changed the adjusted mode, for otherwise
6173          * the hw state checker will get angry at the mismatch. */
6174         crtc_vtotal = adjusted_mode->crtc_vtotal;
6175         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6176
6177         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6178                 /* the chip adds 2 halflines automatically */
6179                 crtc_vtotal -= 1;
6180                 crtc_vblank_end -= 1;
6181
6182                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6183                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6184                 else
6185                         vsyncshift = adjusted_mode->crtc_hsync_start -
6186                                 adjusted_mode->crtc_htotal / 2;
6187                 if (vsyncshift < 0)
6188                         vsyncshift += adjusted_mode->crtc_htotal;
6189         }
6190
6191         if (INTEL_GEN(dev_priv) > 3)
6192                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
6193                                vsyncshift);
6194
6195         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
6196                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
6197         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
6198                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
6199         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
6200                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
6201
6202         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
6203                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
6204         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
6205                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
6206         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
6207                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
6208
6209         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6210          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6211          * documented on the DDI_FUNC_CTL register description, EDP Input Select
6212          * bits. */
6213         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
6214             (pipe == PIPE_B || pipe == PIPE_C))
6215                 intel_de_write(dev_priv, VTOTAL(pipe),
6216                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
6217
6218 }
6219
6220 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
6221 {
6222         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6223         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6224         enum pipe pipe = crtc->pipe;
6225
6226         /* pipesrc controls the size that is scaled from, which should
6227          * always be the user's requested size.
6228          */
6229         intel_de_write(dev_priv, PIPESRC(pipe),
6230                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
6231 }
6232
6233 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
6234 {
6235         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6236         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
6237
6238         if (IS_GEN(dev_priv, 2))
6239                 return false;
6240
6241         if (INTEL_GEN(dev_priv) >= 9 ||
6242             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6243                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
6244         else
6245                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
6246 }
6247
6248 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
6249                                          struct intel_crtc_state *pipe_config)
6250 {
6251         struct drm_device *dev = crtc->base.dev;
6252         struct drm_i915_private *dev_priv = to_i915(dev);
6253         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6254         u32 tmp;
6255
6256         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
6257         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6258         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6259
6260         if (!transcoder_is_dsi(cpu_transcoder)) {
6261                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
6262                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
6263                                                         (tmp & 0xffff) + 1;
6264                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
6265                                                 ((tmp >> 16) & 0xffff) + 1;
6266         }
6267         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
6268         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6269         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6270
6271         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
6272         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6273         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6274
6275         if (!transcoder_is_dsi(cpu_transcoder)) {
6276                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
6277                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
6278                                                         (tmp & 0xffff) + 1;
6279                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
6280                                                 ((tmp >> 16) & 0xffff) + 1;
6281         }
6282         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
6283         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6284         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6285
6286         if (intel_pipe_is_interlaced(pipe_config)) {
6287                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6288                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
6289                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
6290         }
6291 }
6292
6293 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
6294                                     struct intel_crtc_state *pipe_config)
6295 {
6296         struct drm_device *dev = crtc->base.dev;
6297         struct drm_i915_private *dev_priv = to_i915(dev);
6298         u32 tmp;
6299
6300         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
6301         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6302         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6303 }
6304
6305 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
6306 {
6307         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6308         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6309         u32 pipeconf;
6310
6311         pipeconf = 0;
6312
6313         /* we keep both pipes enabled on 830 */
6314         if (IS_I830(dev_priv))
6315                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
6316
6317         if (crtc_state->double_wide)
6318                 pipeconf |= PIPECONF_DOUBLE_WIDE;
6319
6320         /* only g4x and later have fancy bpc/dither controls */
6321         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6322             IS_CHERRYVIEW(dev_priv)) {
6323                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6324                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
6325                         pipeconf |= PIPECONF_DITHER_EN |
6326                                     PIPECONF_DITHER_TYPE_SP;
6327
6328                 switch (crtc_state->pipe_bpp) {
6329                 case 18:
6330                         pipeconf |= PIPECONF_6BPC;
6331                         break;
6332                 case 24:
6333                         pipeconf |= PIPECONF_8BPC;
6334                         break;
6335                 case 30:
6336                         pipeconf |= PIPECONF_10BPC;
6337                         break;
6338                 default:
6339                         /* Case prevented by intel_choose_pipe_bpp_dither. */
6340                         BUG();
6341                 }
6342         }
6343
6344         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6345                 if (INTEL_GEN(dev_priv) < 4 ||
6346                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
6347                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6348                 else
6349                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6350         } else {
6351                 pipeconf |= PIPECONF_PROGRESSIVE;
6352         }
6353
6354         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6355              crtc_state->limited_color_range)
6356                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6357
6358         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
6359
6360         pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
6361
6362         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
6363         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
6364 }
6365
6366 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
6367 {
6368         if (IS_I830(dev_priv))
6369                 return false;
6370
6371         return INTEL_GEN(dev_priv) >= 4 ||
6372                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
6373 }
6374
6375 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
6376 {
6377         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6379         u32 tmp;
6380
6381         if (!i9xx_has_pfit(dev_priv))
6382                 return;
6383
6384         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
6385         if (!(tmp & PFIT_ENABLE))
6386                 return;
6387
6388         /* Check whether the pfit is attached to our pipe. */
6389         if (INTEL_GEN(dev_priv) < 4) {
6390                 if (crtc->pipe != PIPE_B)
6391                         return;
6392         } else {
6393                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6394                         return;
6395         }
6396
6397         crtc_state->gmch_pfit.control = tmp;
6398         crtc_state->gmch_pfit.pgm_ratios =
6399                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
6400 }
6401
6402 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6403                                struct intel_crtc_state *pipe_config)
6404 {
6405         struct drm_device *dev = crtc->base.dev;
6406         struct drm_i915_private *dev_priv = to_i915(dev);
6407         enum pipe pipe = crtc->pipe;
6408         struct dpll clock;
6409         u32 mdiv;
6410         int refclk = 100000;
6411
6412         /* In case of DSI, DPLL will not be used */
6413         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6414                 return;
6415
6416         vlv_dpio_get(dev_priv);
6417         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6418         vlv_dpio_put(dev_priv);
6419
6420         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6421         clock.m2 = mdiv & DPIO_M2DIV_MASK;
6422         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6423         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6424         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6425
6426         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
6427 }
6428
6429 static void
6430 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6431                               struct intel_initial_plane_config *plane_config)
6432 {
6433         struct drm_device *dev = crtc->base.dev;
6434         struct drm_i915_private *dev_priv = to_i915(dev);
6435         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6436         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
6437         enum pipe pipe;
6438         u32 val, base, offset;
6439         int fourcc, pixel_format;
6440         unsigned int aligned_height;
6441         struct drm_framebuffer *fb;
6442         struct intel_framebuffer *intel_fb;
6443
6444         if (!plane->get_hw_state(plane, &pipe))
6445                 return;
6446
6447         drm_WARN_ON(dev, pipe != crtc->pipe);
6448
6449         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6450         if (!intel_fb) {
6451                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
6452                 return;
6453         }
6454
6455         fb = &intel_fb->base;
6456
6457         fb->dev = dev;
6458
6459         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
6460
6461         if (INTEL_GEN(dev_priv) >= 4) {
6462                 if (val & DISPPLANE_TILED) {
6463                         plane_config->tiling = I915_TILING_X;
6464                         fb->modifier = I915_FORMAT_MOD_X_TILED;
6465                 }
6466
6467                 if (val & DISPPLANE_ROTATE_180)
6468                         plane_config->rotation = DRM_MODE_ROTATE_180;
6469         }
6470
6471         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
6472             val & DISPPLANE_MIRROR)
6473                 plane_config->rotation |= DRM_MODE_REFLECT_X;
6474
6475         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6476         fourcc = i9xx_format_to_fourcc(pixel_format);
6477         fb->format = drm_format_info(fourcc);
6478
6479         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6480                 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
6481                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
6482         } else if (INTEL_GEN(dev_priv) >= 4) {
6483                 if (plane_config->tiling)
6484                         offset = intel_de_read(dev_priv,
6485                                                DSPTILEOFF(i9xx_plane));
6486                 else
6487                         offset = intel_de_read(dev_priv,
6488                                                DSPLINOFF(i9xx_plane));
6489                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
6490         } else {
6491                 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
6492         }
6493         plane_config->base = base;
6494
6495         val = intel_de_read(dev_priv, PIPESRC(pipe));
6496         fb->width = ((val >> 16) & 0xfff) + 1;
6497         fb->height = ((val >> 0) & 0xfff) + 1;
6498
6499         val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
6500         fb->pitches[0] = val & 0xffffffc0;
6501
6502         aligned_height = intel_fb_align_height(fb, 0, fb->height);
6503
6504         plane_config->size = fb->pitches[0] * aligned_height;
6505
6506         drm_dbg_kms(&dev_priv->drm,
6507                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6508                     crtc->base.name, plane->base.name, fb->width, fb->height,
6509                     fb->format->cpp[0] * 8, base, fb->pitches[0],
6510                     plane_config->size);
6511
6512         plane_config->fb = intel_fb;
6513 }
6514
6515 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6516                                struct intel_crtc_state *pipe_config)
6517 {
6518         struct drm_device *dev = crtc->base.dev;
6519         struct drm_i915_private *dev_priv = to_i915(dev);
6520         enum pipe pipe = crtc->pipe;
6521         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6522         struct dpll clock;
6523         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
6524         int refclk = 100000;
6525
6526         /* In case of DSI, DPLL will not be used */
6527         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6528                 return;
6529
6530         vlv_dpio_get(dev_priv);
6531         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6532         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6533         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6534         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6535         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6536         vlv_dpio_put(dev_priv);
6537
6538         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6539         clock.m2 = (pll_dw0 & 0xff) << 22;
6540         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
6541                 clock.m2 |= pll_dw2 & 0x3fffff;
6542         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6543         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6544         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6545
6546         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
6547 }
6548
6549 static enum intel_output_format
6550 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
6551 {
6552         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6553         u32 tmp;
6554
6555         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
6556
6557         if (tmp & PIPEMISC_YUV420_ENABLE) {
6558                 /* We support 4:2:0 in full blend mode only */
6559                 drm_WARN_ON(&dev_priv->drm,
6560                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
6561
6562                 return INTEL_OUTPUT_FORMAT_YCBCR420;
6563         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
6564                 return INTEL_OUTPUT_FORMAT_YCBCR444;
6565         } else {
6566                 return INTEL_OUTPUT_FORMAT_RGB;
6567         }
6568 }
6569
6570 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
6571 {
6572         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6573         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6574         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6575         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
6576         u32 tmp;
6577
6578         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
6579
6580         if (tmp & DISPPLANE_GAMMA_ENABLE)
6581                 crtc_state->gamma_enable = true;
6582
6583         if (!HAS_GMCH(dev_priv) &&
6584             tmp & DISPPLANE_PIPE_CSC_ENABLE)
6585                 crtc_state->csc_enable = true;
6586 }
6587
6588 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6589                                  struct intel_crtc_state *pipe_config)
6590 {
6591         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6592         enum intel_display_power_domain power_domain;
6593         intel_wakeref_t wakeref;
6594         u32 tmp;
6595         bool ret;
6596
6597         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
6598         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
6599         if (!wakeref)
6600                 return false;
6601
6602         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6603         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6604         pipe_config->shared_dpll = NULL;
6605
6606         ret = false;
6607
6608         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
6609         if (!(tmp & PIPECONF_ENABLE))
6610                 goto out;
6611
6612         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6613             IS_CHERRYVIEW(dev_priv)) {
6614                 switch (tmp & PIPECONF_BPC_MASK) {
6615                 case PIPECONF_6BPC:
6616                         pipe_config->pipe_bpp = 18;
6617                         break;
6618                 case PIPECONF_8BPC:
6619                         pipe_config->pipe_bpp = 24;
6620                         break;
6621                 case PIPECONF_10BPC:
6622                         pipe_config->pipe_bpp = 30;
6623                         break;
6624                 default:
6625                         break;
6626                 }
6627         }
6628
6629         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6630             (tmp & PIPECONF_COLOR_RANGE_SELECT))
6631                 pipe_config->limited_color_range = true;
6632
6633         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
6634                 PIPECONF_GAMMA_MODE_SHIFT;
6635
6636         if (IS_CHERRYVIEW(dev_priv))
6637                 pipe_config->cgm_mode = intel_de_read(dev_priv,
6638                                                       CGM_PIPE_MODE(crtc->pipe));
6639
6640         i9xx_get_pipe_color_config(pipe_config);
6641         intel_color_get_config(pipe_config);
6642
6643         if (INTEL_GEN(dev_priv) < 4)
6644                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6645
6646         intel_get_transcoder_timings(crtc, pipe_config);
6647         intel_get_pipe_src_size(crtc, pipe_config);
6648
6649         i9xx_get_pfit_config(pipe_config);
6650
6651         if (INTEL_GEN(dev_priv) >= 4) {
6652                 /* No way to read it out on pipes B and C */
6653                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
6654                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
6655                 else
6656                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
6657                 pipe_config->pixel_multiplier =
6658                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6659                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6660                 pipe_config->dpll_hw_state.dpll_md = tmp;
6661         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
6662                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
6663                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
6664                 pipe_config->pixel_multiplier =
6665                         ((tmp & SDVO_MULTIPLIER_MASK)
6666                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6667         } else {
6668                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
6669                  * port and will be fixed up in the encoder->get_config
6670                  * function. */
6671                 pipe_config->pixel_multiplier = 1;
6672         }
6673         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
6674                                                         DPLL(crtc->pipe));
6675         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
6676                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
6677                                                                FP0(crtc->pipe));
6678                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
6679                                                                FP1(crtc->pipe));
6680         } else {
6681                 /* Mask out read-only status bits. */
6682                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6683                                                      DPLL_PORTC_READY_MASK |
6684                                                      DPLL_PORTB_READY_MASK);
6685         }
6686
6687         if (IS_CHERRYVIEW(dev_priv))
6688                 chv_crtc_clock_get(crtc, pipe_config);
6689         else if (IS_VALLEYVIEW(dev_priv))
6690                 vlv_crtc_clock_get(crtc, pipe_config);
6691         else
6692                 i9xx_crtc_clock_get(crtc, pipe_config);
6693
6694         /*
6695          * Normally the dotclock is filled in by the encoder .get_config()
6696          * but in case the pipe is enabled w/o any ports we need a sane
6697          * default.
6698          */
6699         pipe_config->hw.adjusted_mode.crtc_clock =
6700                 pipe_config->port_clock / pipe_config->pixel_multiplier;
6701
6702         ret = true;
6703
6704 out:
6705         intel_display_power_put(dev_priv, power_domain, wakeref);
6706
6707         return ret;
6708 }
6709
6710 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
6711 {
6712         struct intel_encoder *encoder;
6713         int i;
6714         u32 val, final;
6715         bool has_lvds = false;
6716         bool has_cpu_edp = false;
6717         bool has_panel = false;
6718         bool has_ck505 = false;
6719         bool can_ssc = false;
6720         bool using_ssc_source = false;
6721
6722         /* We need to take the global config into account */
6723         for_each_intel_encoder(&dev_priv->drm, encoder) {
6724                 switch (encoder->type) {
6725                 case INTEL_OUTPUT_LVDS:
6726                         has_panel = true;
6727                         has_lvds = true;
6728                         break;
6729                 case INTEL_OUTPUT_EDP:
6730                         has_panel = true;
6731                         if (encoder->port == PORT_A)
6732                                 has_cpu_edp = true;
6733                         break;
6734                 default:
6735                         break;
6736                 }
6737         }
6738
6739         if (HAS_PCH_IBX(dev_priv)) {
6740                 has_ck505 = dev_priv->vbt.display_clock_mode;
6741                 can_ssc = has_ck505;
6742         } else {
6743                 has_ck505 = false;
6744                 can_ssc = true;
6745         }
6746
6747         /* Check if any DPLLs are using the SSC source */
6748         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
6749                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
6750
6751                 if (!(temp & DPLL_VCO_ENABLE))
6752                         continue;
6753
6754                 if ((temp & PLL_REF_INPUT_MASK) ==
6755                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6756                         using_ssc_source = true;
6757                         break;
6758                 }
6759         }
6760
6761         drm_dbg_kms(&dev_priv->drm,
6762                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
6763                     has_panel, has_lvds, has_ck505, using_ssc_source);
6764
6765         /* Ironlake: try to setup display ref clock before DPLL
6766          * enabling. This is only under driver's control after
6767          * PCH B stepping, previous chipset stepping should be
6768          * ignoring this setting.
6769          */
6770         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
6771
6772         /* As we must carefully and slowly disable/enable each source in turn,
6773          * compute the final state we want first and check if we need to
6774          * make any changes at all.
6775          */
6776         final = val;
6777         final &= ~DREF_NONSPREAD_SOURCE_MASK;
6778         if (has_ck505)
6779                 final |= DREF_NONSPREAD_CK505_ENABLE;
6780         else
6781                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
6782
6783         final &= ~DREF_SSC_SOURCE_MASK;
6784         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6785         final &= ~DREF_SSC1_ENABLE;
6786
6787         if (has_panel) {
6788                 final |= DREF_SSC_SOURCE_ENABLE;
6789
6790                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6791                         final |= DREF_SSC1_ENABLE;
6792
6793                 if (has_cpu_edp) {
6794                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
6795                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6796                         else
6797                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6798                 } else
6799                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6800         } else if (using_ssc_source) {
6801                 final |= DREF_SSC_SOURCE_ENABLE;
6802                 final |= DREF_SSC1_ENABLE;
6803         }
6804
6805         if (final == val)
6806                 return;
6807
6808         /* Always enable nonspread source */
6809         val &= ~DREF_NONSPREAD_SOURCE_MASK;
6810
6811         if (has_ck505)
6812                 val |= DREF_NONSPREAD_CK505_ENABLE;
6813         else
6814                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
6815
6816         if (has_panel) {
6817                 val &= ~DREF_SSC_SOURCE_MASK;
6818                 val |= DREF_SSC_SOURCE_ENABLE;
6819
6820                 /* SSC must be turned on before enabling the CPU output  */
6821                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6822                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
6823                         val |= DREF_SSC1_ENABLE;
6824                 } else
6825                         val &= ~DREF_SSC1_ENABLE;
6826
6827                 /* Get SSC going before enabling the outputs */
6828                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6829                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6830                 udelay(200);
6831
6832                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6833
6834                 /* Enable CPU source on CPU attached eDP */
6835                 if (has_cpu_edp) {
6836                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6837                                 drm_dbg_kms(&dev_priv->drm,
6838                                             "Using SSC on eDP\n");
6839                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6840                         } else
6841                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6842                 } else
6843                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6844
6845                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6846                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6847                 udelay(200);
6848         } else {
6849                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
6850
6851                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6852
6853                 /* Turn off CPU output */
6854                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6855
6856                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6857                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6858                 udelay(200);
6859
6860                 if (!using_ssc_source) {
6861                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
6862
6863                         /* Turn off the SSC source */
6864                         val &= ~DREF_SSC_SOURCE_MASK;
6865                         val |= DREF_SSC_SOURCE_DISABLE;
6866
6867                         /* Turn off SSC1 */
6868                         val &= ~DREF_SSC1_ENABLE;
6869
6870                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
6871                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
6872                         udelay(200);
6873                 }
6874         }
6875
6876         BUG_ON(val != final);
6877 }
6878
6879 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6880 {
6881         u32 tmp;
6882
6883         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
6884         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6885         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
6886
6887         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
6888                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6889                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
6890
6891         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
6892         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6893         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
6894
6895         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
6896                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6897                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
6898 }
6899
6900 /* WaMPhyProgramming:hsw */
6901 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6902 {
6903         u32 tmp;
6904
6905         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6906         tmp &= ~(0xFF << 24);
6907         tmp |= (0x12 << 24);
6908         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6909
6910         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6911         tmp |= (1 << 11);
6912         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6913
6914         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6915         tmp |= (1 << 11);
6916         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6917
6918         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6919         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6920         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6921
6922         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6923         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6924         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6925
6926         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6927         tmp &= ~(7 << 13);
6928         tmp |= (5 << 13);
6929         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6930
6931         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6932         tmp &= ~(7 << 13);
6933         tmp |= (5 << 13);
6934         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6935
6936         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6937         tmp &= ~0xFF;
6938         tmp |= 0x1C;
6939         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6940
6941         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6942         tmp &= ~0xFF;
6943         tmp |= 0x1C;
6944         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6945
6946         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6947         tmp &= ~(0xFF << 16);
6948         tmp |= (0x1C << 16);
6949         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6950
6951         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6952         tmp &= ~(0xFF << 16);
6953         tmp |= (0x1C << 16);
6954         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6955
6956         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6957         tmp |= (1 << 27);
6958         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6959
6960         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6961         tmp |= (1 << 27);
6962         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6963
6964         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6965         tmp &= ~(0xF << 28);
6966         tmp |= (4 << 28);
6967         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6968
6969         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6970         tmp &= ~(0xF << 28);
6971         tmp |= (4 << 28);
6972         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6973 }
6974
6975 /* Implements 3 different sequences from BSpec chapter "Display iCLK
6976  * Programming" based on the parameters passed:
6977  * - Sequence to enable CLKOUT_DP
6978  * - Sequence to enable CLKOUT_DP without spread
6979  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6980  */
6981 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
6982                                  bool with_spread, bool with_fdi)
6983 {
6984         u32 reg, tmp;
6985
6986         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
6987                      "FDI requires downspread\n"))
6988                 with_spread = true;
6989         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
6990                      with_fdi, "LP PCH doesn't have FDI\n"))
6991                 with_fdi = false;
6992
6993         mutex_lock(&dev_priv->sb_lock);
6994
6995         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6996         tmp &= ~SBI_SSCCTL_DISABLE;
6997         tmp |= SBI_SSCCTL_PATHALT;
6998         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6999
7000         udelay(24);
7001
7002         if (with_spread) {
7003                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7004                 tmp &= ~SBI_SSCCTL_PATHALT;
7005                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7006
7007                 if (with_fdi) {
7008                         lpt_reset_fdi_mphy(dev_priv);
7009                         lpt_program_fdi_mphy(dev_priv);
7010                 }
7011         }
7012
7013         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
7014         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7015         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7016         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7017
7018         mutex_unlock(&dev_priv->sb_lock);
7019 }
7020
7021 /* Sequence to disable CLKOUT_DP */
7022 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
7023 {
7024         u32 reg, tmp;
7025
7026         mutex_lock(&dev_priv->sb_lock);
7027
7028         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
7029         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7030         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7031         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7032
7033         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7034         if (!(tmp & SBI_SSCCTL_DISABLE)) {
7035                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
7036                         tmp |= SBI_SSCCTL_PATHALT;
7037                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7038                         udelay(32);
7039                 }
7040                 tmp |= SBI_SSCCTL_DISABLE;
7041                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7042         }
7043
7044         mutex_unlock(&dev_priv->sb_lock);
7045 }
7046
7047 #define BEND_IDX(steps) ((50 + (steps)) / 5)
7048
7049 static const u16 sscdivintphase[] = {
7050         [BEND_IDX( 50)] = 0x3B23,
7051         [BEND_IDX( 45)] = 0x3B23,
7052         [BEND_IDX( 40)] = 0x3C23,
7053         [BEND_IDX( 35)] = 0x3C23,
7054         [BEND_IDX( 30)] = 0x3D23,
7055         [BEND_IDX( 25)] = 0x3D23,
7056         [BEND_IDX( 20)] = 0x3E23,
7057         [BEND_IDX( 15)] = 0x3E23,
7058         [BEND_IDX( 10)] = 0x3F23,
7059         [BEND_IDX(  5)] = 0x3F23,
7060         [BEND_IDX(  0)] = 0x0025,
7061         [BEND_IDX( -5)] = 0x0025,
7062         [BEND_IDX(-10)] = 0x0125,
7063         [BEND_IDX(-15)] = 0x0125,
7064         [BEND_IDX(-20)] = 0x0225,
7065         [BEND_IDX(-25)] = 0x0225,
7066         [BEND_IDX(-30)] = 0x0325,
7067         [BEND_IDX(-35)] = 0x0325,
7068         [BEND_IDX(-40)] = 0x0425,
7069         [BEND_IDX(-45)] = 0x0425,
7070         [BEND_IDX(-50)] = 0x0525,
7071 };
7072
7073 /*
7074  * Bend CLKOUT_DP
7075  * steps -50 to 50 inclusive, in steps of 5
7076  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
7077  * change in clock period = -(steps / 10) * 5.787 ps
7078  */
7079 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
7080 {
7081         u32 tmp;
7082         int idx = BEND_IDX(steps);
7083
7084         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
7085                 return;
7086
7087         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
7088                 return;
7089
7090         mutex_lock(&dev_priv->sb_lock);
7091
7092         if (steps % 10 != 0)
7093                 tmp = 0xAAAAAAAB;
7094         else
7095                 tmp = 0x00000000;
7096         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
7097
7098         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
7099         tmp &= 0xffff0000;
7100         tmp |= sscdivintphase[idx];
7101         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
7102
7103         mutex_unlock(&dev_priv->sb_lock);
7104 }
7105
7106 #undef BEND_IDX
7107
7108 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
7109 {
7110         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
7111         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
7112
7113         if ((ctl & SPLL_PLL_ENABLE) == 0)
7114                 return false;
7115
7116         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
7117             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
7118                 return true;
7119
7120         if (IS_BROADWELL(dev_priv) &&
7121             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
7122                 return true;
7123
7124         return false;
7125 }
7126
7127 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
7128                                enum intel_dpll_id id)
7129 {
7130         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
7131         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
7132
7133         if ((ctl & WRPLL_PLL_ENABLE) == 0)
7134                 return false;
7135
7136         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
7137                 return true;
7138
7139         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
7140             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
7141             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
7142                 return true;
7143
7144         return false;
7145 }
7146
7147 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
7148 {
7149         struct intel_encoder *encoder;
7150         bool has_fdi = false;
7151
7152         for_each_intel_encoder(&dev_priv->drm, encoder) {
7153                 switch (encoder->type) {
7154                 case INTEL_OUTPUT_ANALOG:
7155                         has_fdi = true;
7156                         break;
7157                 default:
7158                         break;
7159                 }
7160         }
7161
7162         /*
7163          * The BIOS may have decided to use the PCH SSC
7164          * reference so we must not disable it until the
7165          * relevant PLLs have stopped relying on it. We'll
7166          * just leave the PCH SSC reference enabled in case
7167          * any active PLL is using it. It will get disabled
7168          * after runtime suspend if we don't have FDI.
7169          *
7170          * TODO: Move the whole reference clock handling
7171          * to the modeset sequence proper so that we can
7172          * actually enable/disable/reconfigure these things
7173          * safely. To do that we need to introduce a real
7174          * clock hierarchy. That would also allow us to do
7175          * clock bending finally.
7176          */
7177         dev_priv->pch_ssc_use = 0;
7178
7179         if (spll_uses_pch_ssc(dev_priv)) {
7180                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
7181                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
7182         }
7183
7184         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
7185                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
7186                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
7187         }
7188
7189         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
7190                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
7191                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
7192         }
7193
7194         if (dev_priv->pch_ssc_use)
7195                 return;
7196
7197         if (has_fdi) {
7198                 lpt_bend_clkout_dp(dev_priv, 0);
7199                 lpt_enable_clkout_dp(dev_priv, true, true);
7200         } else {
7201                 lpt_disable_clkout_dp(dev_priv);
7202         }
7203 }
7204
7205 /*
7206  * Initialize reference clocks when the driver loads
7207  */
7208 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
7209 {
7210         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
7211                 ilk_init_pch_refclk(dev_priv);
7212         else if (HAS_PCH_LPT(dev_priv))
7213                 lpt_init_pch_refclk(dev_priv);
7214 }
7215
7216 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
7217 {
7218         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7219         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7220         enum pipe pipe = crtc->pipe;
7221         u32 val;
7222
7223         val = 0;
7224
7225         switch (crtc_state->pipe_bpp) {
7226         case 18:
7227                 val |= PIPECONF_6BPC;
7228                 break;
7229         case 24:
7230                 val |= PIPECONF_8BPC;
7231                 break;
7232         case 30:
7233                 val |= PIPECONF_10BPC;
7234                 break;
7235         case 36:
7236                 val |= PIPECONF_12BPC;
7237                 break;
7238         default:
7239                 /* Case prevented by intel_choose_pipe_bpp_dither. */
7240                 BUG();
7241         }
7242
7243         if (crtc_state->dither)
7244                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7245
7246         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7247                 val |= PIPECONF_INTERLACED_ILK;
7248         else
7249                 val |= PIPECONF_PROGRESSIVE;
7250
7251         /*
7252          * This would end up with an odd purple hue over
7253          * the entire display. Make sure we don't do it.
7254          */
7255         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
7256                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
7257
7258         if (crtc_state->limited_color_range &&
7259             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7260                 val |= PIPECONF_COLOR_RANGE_SELECT;
7261
7262         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
7263                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
7264
7265         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
7266
7267         val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
7268
7269         intel_de_write(dev_priv, PIPECONF(pipe), val);
7270         intel_de_posting_read(dev_priv, PIPECONF(pipe));
7271 }
7272
7273 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
7274 {
7275         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7276         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7277         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7278         u32 val = 0;
7279
7280         if (IS_HASWELL(dev_priv) && crtc_state->dither)
7281                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7282
7283         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7284                 val |= PIPECONF_INTERLACED_ILK;
7285         else
7286                 val |= PIPECONF_PROGRESSIVE;
7287
7288         if (IS_HASWELL(dev_priv) &&
7289             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
7290                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
7291
7292         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
7293         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
7294 }
7295
7296 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
7297 {
7298         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7299         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7300         u32 val = 0;
7301
7302         switch (crtc_state->pipe_bpp) {
7303         case 18:
7304                 val |= PIPEMISC_DITHER_6_BPC;
7305                 break;
7306         case 24:
7307                 val |= PIPEMISC_DITHER_8_BPC;
7308                 break;
7309         case 30:
7310                 val |= PIPEMISC_DITHER_10_BPC;
7311                 break;
7312         case 36:
7313                 val |= PIPEMISC_DITHER_12_BPC;
7314                 break;
7315         default:
7316                 MISSING_CASE(crtc_state->pipe_bpp);
7317                 break;
7318         }
7319
7320         if (crtc_state->dither)
7321                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7322
7323         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7324             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
7325                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
7326
7327         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
7328                 val |= PIPEMISC_YUV420_ENABLE |
7329                         PIPEMISC_YUV420_MODE_FULL_BLEND;
7330
7331         if (INTEL_GEN(dev_priv) >= 11 &&
7332             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
7333                                            BIT(PLANE_CURSOR))) == 0)
7334                 val |= PIPEMISC_HDR_MODE_PRECISION;
7335
7336         if (INTEL_GEN(dev_priv) >= 12)
7337                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
7338
7339         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
7340 }
7341
7342 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
7343 {
7344         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7345         u32 tmp;
7346
7347         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
7348
7349         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
7350         case PIPEMISC_DITHER_6_BPC:
7351                 return 18;
7352         case PIPEMISC_DITHER_8_BPC:
7353                 return 24;
7354         case PIPEMISC_DITHER_10_BPC:
7355                 return 30;
7356         case PIPEMISC_DITHER_12_BPC:
7357                 return 36;
7358         default:
7359                 MISSING_CASE(tmp);
7360                 return 0;
7361         }
7362 }
7363
7364 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
7365 {
7366         /*
7367          * Account for spread spectrum to avoid
7368          * oversubscribing the link. Max center spread
7369          * is 2.5%; use 5% for safety's sake.
7370          */
7371         u32 bps = target_clock * bpp * 21 / 20;
7372         return DIV_ROUND_UP(bps, link_bw * 8);
7373 }
7374
7375 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7376                                          struct intel_link_m_n *m_n)
7377 {
7378         struct drm_device *dev = crtc->base.dev;
7379         struct drm_i915_private *dev_priv = to_i915(dev);
7380         enum pipe pipe = crtc->pipe;
7381
7382         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
7383         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
7384         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
7385                 & ~TU_SIZE_MASK;
7386         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
7387         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
7388                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7389 }
7390
7391 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7392                                          enum transcoder transcoder,
7393                                          struct intel_link_m_n *m_n,
7394                                          struct intel_link_m_n *m2_n2)
7395 {
7396         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7397         enum pipe pipe = crtc->pipe;
7398
7399         if (INTEL_GEN(dev_priv) >= 5) {
7400                 m_n->link_m = intel_de_read(dev_priv,
7401                                             PIPE_LINK_M1(transcoder));
7402                 m_n->link_n = intel_de_read(dev_priv,
7403                                             PIPE_LINK_N1(transcoder));
7404                 m_n->gmch_m = intel_de_read(dev_priv,
7405                                             PIPE_DATA_M1(transcoder))
7406                         & ~TU_SIZE_MASK;
7407                 m_n->gmch_n = intel_de_read(dev_priv,
7408                                             PIPE_DATA_N1(transcoder));
7409                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
7410                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7411
7412                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
7413                         m2_n2->link_m = intel_de_read(dev_priv,
7414                                                       PIPE_LINK_M2(transcoder));
7415                         m2_n2->link_n = intel_de_read(dev_priv,
7416                                                              PIPE_LINK_N2(transcoder));
7417                         m2_n2->gmch_m = intel_de_read(dev_priv,
7418                                                              PIPE_DATA_M2(transcoder))
7419                                         & ~TU_SIZE_MASK;
7420                         m2_n2->gmch_n = intel_de_read(dev_priv,
7421                                                              PIPE_DATA_N2(transcoder));
7422                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
7423                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7424                 }
7425         } else {
7426                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
7427                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
7428                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
7429                         & ~TU_SIZE_MASK;
7430                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
7431                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
7432                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7433         }
7434 }
7435
7436 void intel_dp_get_m_n(struct intel_crtc *crtc,
7437                       struct intel_crtc_state *pipe_config)
7438 {
7439         if (pipe_config->has_pch_encoder)
7440                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7441         else
7442                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7443                                              &pipe_config->dp_m_n,
7444                                              &pipe_config->dp_m2_n2);
7445 }
7446
7447 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
7448                                    struct intel_crtc_state *pipe_config)
7449 {
7450         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7451                                      &pipe_config->fdi_m_n, NULL);
7452 }
7453
7454 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
7455                                   u32 pos, u32 size)
7456 {
7457         drm_rect_init(&crtc_state->pch_pfit.dst,
7458                       pos >> 16, pos & 0xffff,
7459                       size >> 16, size & 0xffff);
7460 }
7461
7462 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
7463 {
7464         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7465         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7466         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
7467         int id = -1;
7468         int i;
7469
7470         /* find scaler attached to this pipe */
7471         for (i = 0; i < crtc->num_scalers; i++) {
7472                 u32 ctl, pos, size;
7473
7474                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
7475                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
7476                         continue;
7477
7478                 id = i;
7479                 crtc_state->pch_pfit.enabled = true;
7480
7481                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
7482                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
7483
7484                 ilk_get_pfit_pos_size(crtc_state, pos, size);
7485
7486                 scaler_state->scalers[i].in_use = true;
7487                 break;
7488         }
7489
7490         scaler_state->scaler_id = id;
7491         if (id >= 0)
7492                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
7493         else
7494                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
7495 }
7496
7497 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
7498 {
7499         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7501         u32 ctl, pos, size;
7502
7503         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
7504         if ((ctl & PF_ENABLE) == 0)
7505                 return;
7506
7507         crtc_state->pch_pfit.enabled = true;
7508
7509         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
7510         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
7511
7512         ilk_get_pfit_pos_size(crtc_state, pos, size);
7513
7514         /*
7515          * We currently do not free assignements of panel fitters on
7516          * ivb/hsw (since we don't use the higher upscaling modes which
7517          * differentiates them) so just WARN about this case for now.
7518          */
7519         drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
7520                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
7521 }
7522
7523 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
7524                                 struct intel_crtc_state *pipe_config)
7525 {
7526         struct drm_device *dev = crtc->base.dev;
7527         struct drm_i915_private *dev_priv = to_i915(dev);
7528         enum intel_display_power_domain power_domain;
7529         intel_wakeref_t wakeref;
7530         u32 tmp;
7531         bool ret;
7532
7533         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7534         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
7535         if (!wakeref)
7536                 return false;
7537
7538         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7539         pipe_config->shared_dpll = NULL;
7540
7541         ret = false;
7542         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
7543         if (!(tmp & PIPECONF_ENABLE))
7544                 goto out;
7545
7546         switch (tmp & PIPECONF_BPC_MASK) {
7547         case PIPECONF_6BPC:
7548                 pipe_config->pipe_bpp = 18;
7549                 break;
7550         case PIPECONF_8BPC:
7551                 pipe_config->pipe_bpp = 24;
7552                 break;
7553         case PIPECONF_10BPC:
7554                 pipe_config->pipe_bpp = 30;
7555                 break;
7556         case PIPECONF_12BPC:
7557                 pipe_config->pipe_bpp = 36;
7558                 break;
7559         default:
7560                 break;
7561         }
7562
7563         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7564                 pipe_config->limited_color_range = true;
7565
7566         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
7567         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
7568         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
7569                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
7570                 break;
7571         default:
7572                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
7573                 break;
7574         }
7575
7576         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
7577                 PIPECONF_GAMMA_MODE_SHIFT;
7578
7579         pipe_config->csc_mode = intel_de_read(dev_priv,
7580                                               PIPE_CSC_MODE(crtc->pipe));
7581
7582         i9xx_get_pipe_color_config(pipe_config);
7583         intel_color_get_config(pipe_config);
7584
7585         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7586                 struct intel_shared_dpll *pll;
7587                 enum intel_dpll_id pll_id;
7588                 bool pll_active;
7589
7590                 pipe_config->has_pch_encoder = true;
7591
7592                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
7593                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7594                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7595
7596                 ilk_get_fdi_m_n_config(crtc, pipe_config);
7597
7598                 if (HAS_PCH_IBX(dev_priv)) {
7599                         /*
7600                          * The pipe->pch transcoder and pch transcoder->pll
7601                          * mapping is fixed.
7602                          */
7603                         pll_id = (enum intel_dpll_id) crtc->pipe;
7604                 } else {
7605                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7606                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7607                                 pll_id = DPLL_ID_PCH_PLL_B;
7608                         else
7609                                 pll_id= DPLL_ID_PCH_PLL_A;
7610                 }
7611
7612                 pipe_config->shared_dpll =
7613                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
7614                 pll = pipe_config->shared_dpll;
7615
7616                 pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7617                                                      &pipe_config->dpll_hw_state);
7618                 drm_WARN_ON(dev, !pll_active);
7619
7620                 tmp = pipe_config->dpll_hw_state.dpll;
7621                 pipe_config->pixel_multiplier =
7622                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7623                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7624
7625                 ilk_pch_clock_get(crtc, pipe_config);
7626         } else {
7627                 pipe_config->pixel_multiplier = 1;
7628         }
7629
7630         intel_get_transcoder_timings(crtc, pipe_config);
7631         intel_get_pipe_src_size(crtc, pipe_config);
7632
7633         ilk_get_pfit_config(pipe_config);
7634
7635         ret = true;
7636
7637 out:
7638         intel_display_power_put(dev_priv, power_domain, wakeref);
7639
7640         return ret;
7641 }
7642
7643 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7644                             struct intel_crtc_state *pipe_config)
7645 {
7646         enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
7647         enum phy phy = intel_port_to_phy(dev_priv, port);
7648         struct icl_port_dpll *port_dpll;
7649         struct intel_shared_dpll *pll;
7650         enum intel_dpll_id id;
7651         bool pll_active;
7652         u32 clk_sel;
7653
7654         clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
7655         id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
7656
7657         if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
7658                 return;
7659
7660         pll = intel_get_shared_dpll_by_id(dev_priv, id);
7661         port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
7662
7663         port_dpll->pll = pll;
7664         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7665                                              &port_dpll->hw_state);
7666         drm_WARN_ON(&dev_priv->drm, !pll_active);
7667
7668         icl_set_active_port_dpll(pipe_config, port_dpll_id);
7669 }
7670
7671 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7672                             struct intel_crtc_state *pipe_config)
7673 {
7674         enum phy phy = intel_port_to_phy(dev_priv, port);
7675         enum icl_port_dpll_id port_dpll_id;
7676         struct icl_port_dpll *port_dpll;
7677         struct intel_shared_dpll *pll;
7678         enum intel_dpll_id id;
7679         bool pll_active;
7680         i915_reg_t reg;
7681         u32 temp;
7682
7683         if (intel_phy_is_combo(dev_priv, phy)) {
7684                 u32 mask, shift;
7685
7686                 if (IS_ALDERLAKE_S(dev_priv)) {
7687                         reg = ADLS_DPCLKA_CFGCR(phy);
7688                         mask = ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy);
7689                         shift = ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy);
7690                 } else if (IS_ROCKETLAKE(dev_priv)) {
7691                         reg = ICL_DPCLKA_CFGCR0;
7692                         mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
7693                         shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
7694                 } else {
7695                         reg = ICL_DPCLKA_CFGCR0;
7696                         mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
7697                         shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
7698                 }
7699
7700                 temp = intel_de_read(dev_priv, reg) & mask;
7701                 id = temp >> shift;
7702                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
7703         } else if (intel_phy_is_tc(dev_priv, phy)) {
7704                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
7705
7706                 if (clk_sel == DDI_CLK_SEL_MG) {
7707                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
7708                                                                     port));
7709                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
7710                 } else {
7711                         drm_WARN_ON(&dev_priv->drm,
7712                                     clk_sel < DDI_CLK_SEL_TBT_162);
7713                         id = DPLL_ID_ICL_TBTPLL;
7714                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
7715                 }
7716         } else {
7717                 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
7718                 return;
7719         }
7720
7721         pll = intel_get_shared_dpll_by_id(dev_priv, id);
7722         port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
7723
7724         port_dpll->pll = pll;
7725         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7726                                              &port_dpll->hw_state);
7727         drm_WARN_ON(&dev_priv->drm, !pll_active);
7728
7729         icl_set_active_port_dpll(pipe_config, port_dpll_id);
7730 }
7731
7732 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7733                             struct intel_crtc_state *pipe_config)
7734 {
7735         struct intel_shared_dpll *pll;
7736         enum intel_dpll_id id;
7737         bool pll_active;
7738         u32 temp;
7739
7740         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
7741         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
7742
7743         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
7744                 return;
7745
7746         pll = intel_get_shared_dpll_by_id(dev_priv, id);
7747
7748         pipe_config->shared_dpll = pll;
7749         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7750                                              &pipe_config->dpll_hw_state);
7751         drm_WARN_ON(&dev_priv->drm, !pll_active);
7752 }
7753
7754 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
7755                                 enum port port,
7756                                 struct intel_crtc_state *pipe_config)
7757 {
7758         struct intel_shared_dpll *pll;
7759         enum intel_dpll_id id;
7760         bool pll_active;
7761
7762         switch (port) {
7763         case PORT_A:
7764                 id = DPLL_ID_SKL_DPLL0;
7765                 break;
7766         case PORT_B:
7767                 id = DPLL_ID_SKL_DPLL1;
7768                 break;
7769         case PORT_C:
7770                 id = DPLL_ID_SKL_DPLL2;
7771                 break;
7772         default:
7773                 drm_err(&dev_priv->drm, "Incorrect port type\n");
7774                 return;
7775         }
7776
7777         pll = intel_get_shared_dpll_by_id(dev_priv, id);
7778
7779         pipe_config->shared_dpll = pll;
7780         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7781                                              &pipe_config->dpll_hw_state);
7782         drm_WARN_ON(&dev_priv->drm, !pll_active);
7783 }
7784
7785 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7786                             struct intel_crtc_state *pipe_config)
7787 {
7788         struct intel_shared_dpll *pll;
7789         enum intel_dpll_id id;
7790         bool pll_active;
7791         u32 temp;
7792
7793         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
7794         id = temp >> (port * 3 + 1);
7795
7796         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
7797                 return;
7798
7799         pll = intel_get_shared_dpll_by_id(dev_priv, id);
7800
7801         pipe_config->shared_dpll = pll;
7802         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7803                                              &pipe_config->dpll_hw_state);
7804         drm_WARN_ON(&dev_priv->drm, !pll_active);
7805 }
7806
7807 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
7808                             struct intel_crtc_state *pipe_config)
7809 {
7810         struct intel_shared_dpll *pll;
7811         enum intel_dpll_id id;
7812         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
7813         bool pll_active;
7814
7815         switch (ddi_pll_sel) {
7816         case PORT_CLK_SEL_WRPLL1:
7817                 id = DPLL_ID_WRPLL1;
7818                 break;
7819         case PORT_CLK_SEL_WRPLL2:
7820                 id = DPLL_ID_WRPLL2;
7821                 break;
7822         case PORT_CLK_SEL_SPLL:
7823                 id = DPLL_ID_SPLL;
7824                 break;
7825         case PORT_CLK_SEL_LCPLL_810:
7826                 id = DPLL_ID_LCPLL_810;
7827                 break;
7828         case PORT_CLK_SEL_LCPLL_1350:
7829                 id = DPLL_ID_LCPLL_1350;
7830                 break;
7831         case PORT_CLK_SEL_LCPLL_2700:
7832                 id = DPLL_ID_LCPLL_2700;
7833                 break;
7834         default:
7835                 MISSING_CASE(ddi_pll_sel);
7836                 fallthrough;
7837         case PORT_CLK_SEL_NONE:
7838                 return;
7839         }
7840
7841         pll = intel_get_shared_dpll_by_id(dev_priv, id);
7842
7843         pipe_config->shared_dpll = pll;
7844         pll_active = intel_dpll_get_hw_state(dev_priv, pll,
7845                                              &pipe_config->dpll_hw_state);
7846         drm_WARN_ON(&dev_priv->drm, !pll_active);
7847 }
7848
7849 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
7850                                      struct intel_crtc_state *pipe_config,
7851                                      struct intel_display_power_domain_set *power_domain_set)
7852 {
7853         struct drm_device *dev = crtc->base.dev;
7854         struct drm_i915_private *dev_priv = to_i915(dev);
7855         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
7856         unsigned long enabled_panel_transcoders = 0;
7857         enum transcoder panel_transcoder;
7858         u32 tmp;
7859
7860         if (INTEL_GEN(dev_priv) >= 11)
7861                 panel_transcoder_mask |=
7862                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
7863
7864         /*
7865          * The pipe->transcoder mapping is fixed with the exception of the eDP
7866          * and DSI transcoders handled below.
7867          */
7868         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7869
7870         /*
7871          * XXX: Do intel_display_power_get_if_enabled before reading this (for
7872          * consistency and less surprising code; it's in always on power).
7873          */
7874         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
7875                                        panel_transcoder_mask) {
7876                 bool force_thru = false;
7877                 enum pipe trans_pipe;
7878
7879                 tmp = intel_de_read(dev_priv,
7880                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
7881                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
7882                         continue;
7883
7884                 /*
7885                  * Log all enabled ones, only use the first one.
7886                  *
7887                  * FIXME: This won't work for two separate DSI displays.
7888                  */
7889                 enabled_panel_transcoders |= BIT(panel_transcoder);
7890                 if (enabled_panel_transcoders != BIT(panel_transcoder))
7891                         continue;
7892
7893                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7894                 default:
7895                         drm_WARN(dev, 1,
7896                                  "unknown pipe linked to transcoder %s\n",
7897                                  transcoder_name(panel_transcoder));
7898                         fallthrough;
7899                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
7900                         force_thru = true;
7901                         fallthrough;
7902                 case TRANS_DDI_EDP_INPUT_A_ON:
7903                         trans_pipe = PIPE_A;
7904                         break;
7905                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
7906                         trans_pipe = PIPE_B;
7907                         break;
7908                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
7909                         trans_pipe = PIPE_C;
7910                         break;
7911                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
7912                         trans_pipe = PIPE_D;
7913                         break;
7914                 }
7915
7916                 if (trans_pipe == crtc->pipe) {
7917                         pipe_config->cpu_transcoder = panel_transcoder;
7918                         pipe_config->pch_pfit.force_thru = force_thru;
7919                 }
7920         }
7921
7922         /*
7923          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
7924          */
7925         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
7926                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
7927
7928         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
7929                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7930                 return false;
7931
7932         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
7933
7934         return tmp & PIPECONF_ENABLE;
7935 }
7936
7937 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
7938                                          struct intel_crtc_state *pipe_config,
7939                                          struct intel_display_power_domain_set *power_domain_set)
7940 {
7941         struct drm_device *dev = crtc->base.dev;
7942         struct drm_i915_private *dev_priv = to_i915(dev);
7943         enum transcoder cpu_transcoder;
7944         enum port port;
7945         u32 tmp;
7946
7947         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
7948                 if (port == PORT_A)
7949                         cpu_transcoder = TRANSCODER_DSI_A;
7950                 else
7951                         cpu_transcoder = TRANSCODER_DSI_C;
7952
7953                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
7954                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
7955                         continue;
7956
7957                 /*
7958                  * The PLL needs to be enabled with a valid divider
7959                  * configuration, otherwise accessing DSI registers will hang
7960                  * the machine. See BSpec North Display Engine
7961                  * registers/MIPI[BXT]. We can break out here early, since we
7962                  * need the same DSI PLL to be enabled for both DSI ports.
7963                  */
7964                 if (!bxt_dsi_pll_is_enabled(dev_priv))
7965                         break;
7966
7967                 /* XXX: this works for video mode only */
7968                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
7969                 if (!(tmp & DPI_ENABLE))
7970                         continue;
7971
7972                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
7973                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
7974                         continue;
7975
7976                 pipe_config->cpu_transcoder = cpu_transcoder;
7977                 break;
7978         }
7979
7980         return transcoder_is_dsi(pipe_config->cpu_transcoder);
7981 }
7982
7983 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
7984                                    struct intel_crtc_state *pipe_config)
7985 {
7986         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7987         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7988         enum port port;
7989         u32 tmp;
7990
7991         if (transcoder_is_dsi(cpu_transcoder)) {
7992                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
7993                                                 PORT_A : PORT_B;
7994         } else {
7995                 tmp = intel_de_read(dev_priv,
7996                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
7997                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
7998                         return;
7999                 if (INTEL_GEN(dev_priv) >= 12)
8000                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
8001                 else
8002                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
8003         }
8004
8005         if (IS_DG1(dev_priv))
8006                 dg1_get_ddi_pll(dev_priv, port, pipe_config);
8007         else if (INTEL_GEN(dev_priv) >= 11)
8008                 icl_get_ddi_pll(dev_priv, port, pipe_config);
8009         else if (IS_CANNONLAKE(dev_priv))
8010                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
8011         else if (IS_GEN9_LP(dev_priv))
8012                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
8013         else if (IS_GEN9_BC(dev_priv))
8014                 skl_get_ddi_pll(dev_priv, port, pipe_config);
8015         else
8016                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
8017
8018         /*
8019          * Haswell has only FDI/PCH transcoder A. It is which is connected to
8020          * DDI E. So just check whether this pipe is wired to DDI E and whether
8021          * the PCH transcoder is on.
8022          */
8023         if (INTEL_GEN(dev_priv) < 9 &&
8024             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
8025                 pipe_config->has_pch_encoder = true;
8026
8027                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
8028                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8029                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8030
8031                 ilk_get_fdi_m_n_config(crtc, pipe_config);
8032         }
8033 }
8034
8035 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
8036                                 struct intel_crtc_state *pipe_config)
8037 {
8038         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8039         struct intel_display_power_domain_set power_domain_set = { };
8040         bool active;
8041         u32 tmp;
8042
8043         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
8044                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
8045                 return false;
8046
8047         pipe_config->shared_dpll = NULL;
8048
8049         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
8050
8051         if (IS_GEN9_LP(dev_priv) &&
8052             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
8053                 drm_WARN_ON(&dev_priv->drm, active);
8054                 active = true;
8055         }
8056
8057         intel_dsc_get_config(pipe_config);
8058
8059         if (!active) {
8060                 /* bigjoiner slave doesn't enable transcoder */
8061                 if (!pipe_config->bigjoiner_slave)
8062                         goto out;
8063
8064                 active = true;
8065                 pipe_config->pixel_multiplier = 1;
8066
8067                 /* we cannot read out most state, so don't bother.. */
8068                 pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
8069         } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
8070             INTEL_GEN(dev_priv) >= 11) {
8071                 hsw_get_ddi_port_state(crtc, pipe_config);
8072                 intel_get_transcoder_timings(crtc, pipe_config);
8073         }
8074
8075         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
8076                 intel_vrr_get_config(crtc, pipe_config);
8077
8078         intel_get_pipe_src_size(crtc, pipe_config);
8079
8080         if (IS_HASWELL(dev_priv)) {
8081                 u32 tmp = intel_de_read(dev_priv,
8082                                         PIPECONF(pipe_config->cpu_transcoder));
8083
8084                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
8085                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
8086                 else
8087                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8088         } else {
8089                 pipe_config->output_format =
8090                         bdw_get_pipemisc_output_format(crtc);
8091         }
8092
8093         pipe_config->gamma_mode = intel_de_read(dev_priv,
8094                                                 GAMMA_MODE(crtc->pipe));
8095
8096         pipe_config->csc_mode = intel_de_read(dev_priv,
8097                                               PIPE_CSC_MODE(crtc->pipe));
8098
8099         if (INTEL_GEN(dev_priv) >= 9) {
8100                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
8101
8102                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
8103                         pipe_config->gamma_enable = true;
8104
8105                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
8106                         pipe_config->csc_enable = true;
8107         } else {
8108                 i9xx_get_pipe_color_config(pipe_config);
8109         }
8110
8111         intel_color_get_config(pipe_config);
8112
8113         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
8114         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
8115         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8116                 pipe_config->ips_linetime =
8117                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
8118
8119         if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
8120                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
8121                 if (INTEL_GEN(dev_priv) >= 9)
8122                         skl_get_pfit_config(pipe_config);
8123                 else
8124                         ilk_get_pfit_config(pipe_config);
8125         }
8126
8127         if (hsw_crtc_supports_ips(crtc)) {
8128                 if (IS_HASWELL(dev_priv))
8129                         pipe_config->ips_enabled = intel_de_read(dev_priv,
8130                                                                  IPS_CTL) & IPS_ENABLE;
8131                 else {
8132                         /*
8133                          * We cannot readout IPS state on broadwell, set to
8134                          * true so we can set it to a defined state on first
8135                          * commit.
8136                          */
8137                         pipe_config->ips_enabled = true;
8138                 }
8139         }
8140
8141         if (pipe_config->bigjoiner_slave) {
8142                 /* Cannot be read out as a slave, set to 0. */
8143                 pipe_config->pixel_multiplier = 0;
8144         } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
8145             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
8146                 pipe_config->pixel_multiplier =
8147                         intel_de_read(dev_priv,
8148                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
8149         } else {
8150                 pipe_config->pixel_multiplier = 1;
8151         }
8152
8153 out:
8154         intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
8155
8156         return active;
8157 }
8158
8159 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
8160 {
8161         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8162         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8163
8164         if (!i915->display.get_pipe_config(crtc, crtc_state))
8165                 return false;
8166
8167         crtc_state->hw.active = true;
8168
8169         intel_crtc_readout_derived_state(crtc_state);
8170
8171         return true;
8172 }
8173
8174 /* VESA 640x480x72Hz mode to set on the pipe */
8175 static const struct drm_display_mode load_detect_mode = {
8176         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8177                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8178 };
8179
8180 struct drm_framebuffer *
8181 intel_framebuffer_create(struct drm_i915_gem_object *obj,
8182                          struct drm_mode_fb_cmd2 *mode_cmd)
8183 {
8184         struct intel_framebuffer *intel_fb;
8185         int ret;
8186
8187         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8188         if (!intel_fb)
8189                 return ERR_PTR(-ENOMEM);
8190
8191         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
8192         if (ret)
8193                 goto err;
8194
8195         return &intel_fb->base;
8196
8197 err:
8198         kfree(intel_fb);
8199         return ERR_PTR(ret);
8200 }
8201
8202 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
8203                                         struct drm_crtc *crtc)
8204 {
8205         struct drm_plane *plane;
8206         struct drm_plane_state *plane_state;
8207         int ret, i;
8208
8209         ret = drm_atomic_add_affected_planes(state, crtc);
8210         if (ret)
8211                 return ret;
8212
8213         for_each_new_plane_in_state(state, plane, plane_state, i) {
8214                 if (plane_state->crtc != crtc)
8215                         continue;
8216
8217                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
8218                 if (ret)
8219                         return ret;
8220
8221                 drm_atomic_set_fb_for_plane(plane_state, NULL);
8222         }
8223
8224         return 0;
8225 }
8226
8227 int intel_get_load_detect_pipe(struct drm_connector *connector,
8228                                struct intel_load_detect_pipe *old,
8229                                struct drm_modeset_acquire_ctx *ctx)
8230 {
8231         struct intel_crtc *intel_crtc;
8232         struct intel_encoder *intel_encoder =
8233                 intel_attached_encoder(to_intel_connector(connector));
8234         struct drm_crtc *possible_crtc;
8235         struct drm_encoder *encoder = &intel_encoder->base;
8236         struct drm_crtc *crtc = NULL;
8237         struct drm_device *dev = encoder->dev;
8238         struct drm_i915_private *dev_priv = to_i915(dev);
8239         struct drm_mode_config *config = &dev->mode_config;
8240         struct drm_atomic_state *state = NULL, *restore_state = NULL;
8241         struct drm_connector_state *connector_state;
8242         struct intel_crtc_state *crtc_state;
8243         int ret, i = -1;
8244
8245         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8246                     connector->base.id, connector->name,
8247                     encoder->base.id, encoder->name);
8248
8249         old->restore_state = NULL;
8250
8251         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
8252
8253         /*
8254          * Algorithm gets a little messy:
8255          *
8256          *   - if the connector already has an assigned crtc, use it (but make
8257          *     sure it's on first)
8258          *
8259          *   - try to find the first unused crtc that can drive this connector,
8260          *     and use that if we find one
8261          */
8262
8263         /* See if we already have a CRTC for this connector */
8264         if (connector->state->crtc) {
8265                 crtc = connector->state->crtc;
8266
8267                 ret = drm_modeset_lock(&crtc->mutex, ctx);
8268                 if (ret)
8269                         goto fail;
8270
8271                 /* Make sure the crtc and connector are running */
8272                 goto found;
8273         }
8274
8275         /* Find an unused one (if possible) */
8276         for_each_crtc(dev, possible_crtc) {
8277                 i++;
8278                 if (!(encoder->possible_crtcs & (1 << i)))
8279                         continue;
8280
8281                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
8282                 if (ret)
8283                         goto fail;
8284
8285                 if (possible_crtc->state->enable) {
8286                         drm_modeset_unlock(&possible_crtc->mutex);
8287                         continue;
8288                 }
8289
8290                 crtc = possible_crtc;
8291                 break;
8292         }
8293
8294         /*
8295          * If we didn't find an unused CRTC, don't use any.
8296          */
8297         if (!crtc) {
8298                 drm_dbg_kms(&dev_priv->drm,
8299                             "no pipe available for load-detect\n");
8300                 ret = -ENODEV;
8301                 goto fail;
8302         }
8303
8304 found:
8305         intel_crtc = to_intel_crtc(crtc);
8306
8307         state = drm_atomic_state_alloc(dev);
8308         restore_state = drm_atomic_state_alloc(dev);
8309         if (!state || !restore_state) {
8310                 ret = -ENOMEM;
8311                 goto fail;
8312         }
8313
8314         state->acquire_ctx = ctx;
8315         restore_state->acquire_ctx = ctx;
8316
8317         connector_state = drm_atomic_get_connector_state(state, connector);
8318         if (IS_ERR(connector_state)) {
8319                 ret = PTR_ERR(connector_state);
8320                 goto fail;
8321         }
8322
8323         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
8324         if (ret)
8325                 goto fail;
8326
8327         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
8328         if (IS_ERR(crtc_state)) {
8329                 ret = PTR_ERR(crtc_state);
8330                 goto fail;
8331         }
8332
8333         crtc_state->uapi.active = true;
8334
8335         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
8336                                            &load_detect_mode);
8337         if (ret)
8338                 goto fail;
8339
8340         ret = intel_modeset_disable_planes(state, crtc);
8341         if (ret)
8342                 goto fail;
8343
8344         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
8345         if (!ret)
8346                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
8347         if (!ret)
8348                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
8349         if (ret) {
8350                 drm_dbg_kms(&dev_priv->drm,
8351                             "Failed to create a copy of old state to restore: %i\n",
8352                             ret);
8353                 goto fail;
8354         }
8355
8356         ret = drm_atomic_commit(state);
8357         if (ret) {
8358                 drm_dbg_kms(&dev_priv->drm,
8359                             "failed to set mode on load-detect pipe\n");
8360                 goto fail;
8361         }
8362
8363         old->restore_state = restore_state;
8364         drm_atomic_state_put(state);
8365
8366         /* let the connector get through one full cycle before testing */
8367         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
8368         return true;
8369
8370 fail:
8371         if (state) {
8372                 drm_atomic_state_put(state);
8373                 state = NULL;
8374         }
8375         if (restore_state) {
8376                 drm_atomic_state_put(restore_state);
8377                 restore_state = NULL;
8378         }
8379
8380         if (ret == -EDEADLK)
8381                 return ret;
8382
8383         return false;
8384 }
8385
8386 void intel_release_load_detect_pipe(struct drm_connector *connector,
8387                                     struct intel_load_detect_pipe *old,
8388                                     struct drm_modeset_acquire_ctx *ctx)
8389 {
8390         struct intel_encoder *intel_encoder =
8391                 intel_attached_encoder(to_intel_connector(connector));
8392         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
8393         struct drm_encoder *encoder = &intel_encoder->base;
8394         struct drm_atomic_state *state = old->restore_state;
8395         int ret;
8396
8397         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8398                     connector->base.id, connector->name,
8399                     encoder->base.id, encoder->name);
8400
8401         if (!state)
8402                 return;
8403
8404         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
8405         if (ret)
8406                 drm_dbg_kms(&i915->drm,
8407                             "Couldn't release load detect pipe: %i\n", ret);
8408         drm_atomic_state_put(state);
8409 }
8410
8411 static int i9xx_pll_refclk(struct drm_device *dev,
8412                            const struct intel_crtc_state *pipe_config)
8413 {
8414         struct drm_i915_private *dev_priv = to_i915(dev);
8415         u32 dpll = pipe_config->dpll_hw_state.dpll;
8416
8417         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8418                 return dev_priv->vbt.lvds_ssc_freq;
8419         else if (HAS_PCH_SPLIT(dev_priv))
8420                 return 120000;
8421         else if (!IS_GEN(dev_priv, 2))
8422                 return 96000;
8423         else
8424                 return 48000;
8425 }
8426
8427 /* Returns the clock of the currently programmed mode of the given pipe. */
8428 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8429                                 struct intel_crtc_state *pipe_config)
8430 {
8431         struct drm_device *dev = crtc->base.dev;
8432         struct drm_i915_private *dev_priv = to_i915(dev);
8433         enum pipe pipe = crtc->pipe;
8434         u32 dpll = pipe_config->dpll_hw_state.dpll;
8435         u32 fp;
8436         struct dpll clock;
8437         int port_clock;
8438         int refclk = i9xx_pll_refclk(dev, pipe_config);
8439
8440         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8441                 fp = pipe_config->dpll_hw_state.fp0;
8442         else
8443                 fp = pipe_config->dpll_hw_state.fp1;
8444
8445         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8446         if (IS_PINEVIEW(dev_priv)) {
8447                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8448                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8449         } else {
8450                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8451                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8452         }
8453
8454         if (!IS_GEN(dev_priv, 2)) {
8455                 if (IS_PINEVIEW(dev_priv))
8456                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8457                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8458                 else
8459                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8460                                DPLL_FPA01_P1_POST_DIV_SHIFT);
8461
8462                 switch (dpll & DPLL_MODE_MASK) {
8463                 case DPLLB_MODE_DAC_SERIAL:
8464                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8465                                 5 : 10;
8466                         break;
8467                 case DPLLB_MODE_LVDS:
8468                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8469                                 7 : 14;
8470                         break;
8471                 default:
8472                         drm_dbg_kms(&dev_priv->drm,
8473                                     "Unknown DPLL mode %08x in programmed "
8474                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
8475                         return;
8476                 }
8477
8478                 if (IS_PINEVIEW(dev_priv))
8479                         port_clock = pnv_calc_dpll_params(refclk, &clock);
8480                 else
8481                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
8482         } else {
8483                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
8484                                                                  LVDS);
8485                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8486
8487                 if (is_lvds) {
8488                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8489                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
8490
8491                         if (lvds & LVDS_CLKB_POWER_UP)
8492                                 clock.p2 = 7;
8493                         else
8494                                 clock.p2 = 14;
8495                 } else {
8496                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
8497                                 clock.p1 = 2;
8498                         else {
8499                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8500                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8501                         }
8502                         if (dpll & PLL_P2_DIVIDE_BY_4)
8503                                 clock.p2 = 4;
8504                         else
8505                                 clock.p2 = 2;
8506                 }
8507
8508                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
8509         }
8510
8511         /*
8512          * This value includes pixel_multiplier. We will use
8513          * port_clock to compute adjusted_mode.crtc_clock in the
8514          * encoder's get_config() function.
8515          */
8516         pipe_config->port_clock = port_clock;
8517 }
8518
8519 int intel_dotclock_calculate(int link_freq,
8520                              const struct intel_link_m_n *m_n)
8521 {
8522         /*
8523          * The calculation for the data clock is:
8524          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8525          * But we want to avoid losing precison if possible, so:
8526          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8527          *
8528          * and the link clock is simpler:
8529          * link_clock = (m * link_clock) / n
8530          */
8531
8532         if (!m_n->link_n)
8533                 return 0;
8534
8535         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
8536 }
8537
8538 static void ilk_pch_clock_get(struct intel_crtc *crtc,
8539                               struct intel_crtc_state *pipe_config)
8540 {
8541         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8542
8543         /* read out port_clock from the DPLL */
8544         i9xx_crtc_clock_get(crtc, pipe_config);
8545
8546         /*
8547          * In case there is an active pipe without active ports,
8548          * we may need some idea for the dotclock anyway.
8549          * Calculate one based on the FDI configuration.
8550          */
8551         pipe_config->hw.adjusted_mode.crtc_clock =
8552                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8553                                          &pipe_config->fdi_m_n);
8554 }
8555
8556 /* Returns the currently programmed mode of the given encoder. */
8557 struct drm_display_mode *
8558 intel_encoder_current_mode(struct intel_encoder *encoder)
8559 {
8560         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
8561         struct intel_crtc_state *crtc_state;
8562         struct drm_display_mode *mode;
8563         struct intel_crtc *crtc;
8564         enum pipe pipe;
8565
8566         if (!encoder->get_hw_state(encoder, &pipe))
8567                 return NULL;
8568
8569         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8570
8571         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8572         if (!mode)
8573                 return NULL;
8574
8575         crtc_state = intel_crtc_state_alloc(crtc);
8576         if (!crtc_state) {
8577                 kfree(mode);
8578                 return NULL;
8579         }
8580
8581         if (!intel_crtc_get_pipe_config(crtc_state)) {
8582                 kfree(crtc_state);
8583                 kfree(mode);
8584                 return NULL;
8585         }
8586
8587         intel_encoder_get_config(encoder, crtc_state);
8588
8589         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
8590
8591         kfree(crtc_state);
8592
8593         return mode;
8594 }
8595
8596 /**
8597  * intel_wm_need_update - Check whether watermarks need updating
8598  * @cur: current plane state
8599  * @new: new plane state
8600  *
8601  * Check current plane state versus the new one to determine whether
8602  * watermarks need to be recalculated.
8603  *
8604  * Returns true or false.
8605  */
8606 static bool intel_wm_need_update(const struct intel_plane_state *cur,
8607                                  struct intel_plane_state *new)
8608 {
8609         /* Update watermarks on tiling or size changes. */
8610         if (new->uapi.visible != cur->uapi.visible)
8611                 return true;
8612
8613         if (!cur->hw.fb || !new->hw.fb)
8614                 return false;
8615
8616         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
8617             cur->hw.rotation != new->hw.rotation ||
8618             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
8619             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
8620             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
8621             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
8622                 return true;
8623
8624         return false;
8625 }
8626
8627 static bool needs_scaling(const struct intel_plane_state *state)
8628 {
8629         int src_w = drm_rect_width(&state->uapi.src) >> 16;
8630         int src_h = drm_rect_height(&state->uapi.src) >> 16;
8631         int dst_w = drm_rect_width(&state->uapi.dst);
8632         int dst_h = drm_rect_height(&state->uapi.dst);
8633
8634         return (src_w != dst_w || src_h != dst_h);
8635 }
8636
8637 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
8638                                     struct intel_crtc_state *crtc_state,
8639                                     const struct intel_plane_state *old_plane_state,
8640                                     struct intel_plane_state *plane_state)
8641 {
8642         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8643         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
8644         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8645         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8646         bool was_crtc_enabled = old_crtc_state->hw.active;
8647         bool is_crtc_enabled = crtc_state->hw.active;
8648         bool turn_off, turn_on, visible, was_visible;
8649         int ret;
8650
8651         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
8652                 ret = skl_update_scaler_plane(crtc_state, plane_state);
8653                 if (ret)
8654                         return ret;
8655         }
8656
8657         was_visible = old_plane_state->uapi.visible;
8658         visible = plane_state->uapi.visible;
8659
8660         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
8661                 was_visible = false;
8662
8663         /*
8664          * Visibility is calculated as if the crtc was on, but
8665          * after scaler setup everything depends on it being off
8666          * when the crtc isn't active.
8667          *
8668          * FIXME this is wrong for watermarks. Watermarks should also
8669          * be computed as if the pipe would be active. Perhaps move
8670          * per-plane wm computation to the .check_plane() hook, and
8671          * only combine the results from all planes in the current place?
8672          */
8673         if (!is_crtc_enabled) {
8674                 intel_plane_set_invisible(crtc_state, plane_state);
8675                 visible = false;
8676         }
8677
8678         if (!was_visible && !visible)
8679                 return 0;
8680
8681         turn_off = was_visible && (!visible || mode_changed);
8682         turn_on = visible && (!was_visible || mode_changed);
8683
8684         drm_dbg_atomic(&dev_priv->drm,
8685                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
8686                        crtc->base.base.id, crtc->base.name,
8687                        plane->base.base.id, plane->base.name,
8688                        was_visible, visible,
8689                        turn_off, turn_on, mode_changed);
8690
8691         if (turn_on) {
8692                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
8693                         crtc_state->update_wm_pre = true;
8694
8695                 /* must disable cxsr around plane enable/disable */
8696                 if (plane->id != PLANE_CURSOR)
8697                         crtc_state->disable_cxsr = true;
8698         } else if (turn_off) {
8699                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
8700                         crtc_state->update_wm_post = true;
8701
8702                 /* must disable cxsr around plane enable/disable */
8703                 if (plane->id != PLANE_CURSOR)
8704                         crtc_state->disable_cxsr = true;
8705         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
8706                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
8707                         /* FIXME bollocks */
8708                         crtc_state->update_wm_pre = true;
8709                         crtc_state->update_wm_post = true;
8710                 }
8711         }
8712
8713         if (visible || was_visible)
8714                 crtc_state->fb_bits |= plane->frontbuffer_bit;
8715
8716         /*
8717          * ILK/SNB DVSACNTR/Sprite Enable
8718          * IVB SPR_CTL/Sprite Enable
8719          * "When in Self Refresh Big FIFO mode, a write to enable the
8720          *  plane will be internally buffered and delayed while Big FIFO
8721          *  mode is exiting."
8722          *
8723          * Which means that enabling the sprite can take an extra frame
8724          * when we start in big FIFO mode (LP1+). Thus we need to drop
8725          * down to LP0 and wait for vblank in order to make sure the
8726          * sprite gets enabled on the next vblank after the register write.
8727          * Doing otherwise would risk enabling the sprite one frame after
8728          * we've already signalled flip completion. We can resume LP1+
8729          * once the sprite has been enabled.
8730          *
8731          *
8732          * WaCxSRDisabledForSpriteScaling:ivb
8733          * IVB SPR_SCALE/Scaling Enable
8734          * "Low Power watermarks must be disabled for at least one
8735          *  frame before enabling sprite scaling, and kept disabled
8736          *  until sprite scaling is disabled."
8737          *
8738          * ILK/SNB DVSASCALE/Scaling Enable
8739          * "When in Self Refresh Big FIFO mode, scaling enable will be
8740          *  masked off while Big FIFO mode is exiting."
8741          *
8742          * Despite the w/a only being listed for IVB we assume that
8743          * the ILK/SNB note has similar ramifications, hence we apply
8744          * the w/a on all three platforms.
8745          *
8746          * With experimental results seems this is needed also for primary
8747          * plane, not only sprite plane.
8748          */
8749         if (plane->id != PLANE_CURSOR &&
8750             (IS_GEN_RANGE(dev_priv, 5, 6) ||
8751              IS_IVYBRIDGE(dev_priv)) &&
8752             (turn_on || (!needs_scaling(old_plane_state) &&
8753                          needs_scaling(plane_state))))
8754                 crtc_state->disable_lp_wm = true;
8755
8756         return 0;
8757 }
8758
8759 static bool encoders_cloneable(const struct intel_encoder *a,
8760                                const struct intel_encoder *b)
8761 {
8762         /* masks could be asymmetric, so check both ways */
8763         return a == b || (a->cloneable & (1 << b->type) &&
8764                           b->cloneable & (1 << a->type));
8765 }
8766
8767 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
8768                                          struct intel_crtc *crtc,
8769                                          struct intel_encoder *encoder)
8770 {
8771         struct intel_encoder *source_encoder;
8772         struct drm_connector *connector;
8773         struct drm_connector_state *connector_state;
8774         int i;
8775
8776         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
8777                 if (connector_state->crtc != &crtc->base)
8778                         continue;
8779
8780                 source_encoder =
8781                         to_intel_encoder(connector_state->best_encoder);
8782                 if (!encoders_cloneable(encoder, source_encoder))
8783                         return false;
8784         }
8785
8786         return true;
8787 }
8788
8789 static int icl_add_linked_planes(struct intel_atomic_state *state)
8790 {
8791         struct intel_plane *plane, *linked;
8792         struct intel_plane_state *plane_state, *linked_plane_state;
8793         int i;
8794
8795         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8796                 linked = plane_state->planar_linked_plane;
8797
8798                 if (!linked)
8799                         continue;
8800
8801                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
8802                 if (IS_ERR(linked_plane_state))
8803                         return PTR_ERR(linked_plane_state);
8804
8805                 drm_WARN_ON(state->base.dev,
8806                             linked_plane_state->planar_linked_plane != plane);
8807                 drm_WARN_ON(state->base.dev,
8808                             linked_plane_state->planar_slave == plane_state->planar_slave);
8809         }
8810
8811         return 0;
8812 }
8813
8814 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
8815 {
8816         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8817         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8818         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
8819         struct intel_plane *plane, *linked;
8820         struct intel_plane_state *plane_state;
8821         int i;
8822
8823         if (INTEL_GEN(dev_priv) < 11)
8824                 return 0;
8825
8826         /*
8827          * Destroy all old plane links and make the slave plane invisible
8828          * in the crtc_state->active_planes mask.
8829          */
8830         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8831                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
8832                         continue;
8833
8834                 plane_state->planar_linked_plane = NULL;
8835                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
8836                         crtc_state->enabled_planes &= ~BIT(plane->id);
8837                         crtc_state->active_planes &= ~BIT(plane->id);
8838                         crtc_state->update_planes |= BIT(plane->id);
8839                 }
8840
8841                 plane_state->planar_slave = false;
8842         }
8843
8844         if (!crtc_state->nv12_planes)
8845                 return 0;
8846
8847         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8848                 struct intel_plane_state *linked_state = NULL;
8849
8850                 if (plane->pipe != crtc->pipe ||
8851                     !(crtc_state->nv12_planes & BIT(plane->id)))
8852                         continue;
8853
8854                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
8855                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
8856                                 continue;
8857
8858                         if (crtc_state->active_planes & BIT(linked->id))
8859                                 continue;
8860
8861                         linked_state = intel_atomic_get_plane_state(state, linked);
8862                         if (IS_ERR(linked_state))
8863                                 return PTR_ERR(linked_state);
8864
8865                         break;
8866                 }
8867
8868                 if (!linked_state) {
8869                         drm_dbg_kms(&dev_priv->drm,
8870                                     "Need %d free Y planes for planar YUV\n",
8871                                     hweight8(crtc_state->nv12_planes));
8872
8873                         return -EINVAL;
8874                 }
8875
8876                 plane_state->planar_linked_plane = linked;
8877
8878                 linked_state->planar_slave = true;
8879                 linked_state->planar_linked_plane = plane;
8880                 crtc_state->enabled_planes |= BIT(linked->id);
8881                 crtc_state->active_planes |= BIT(linked->id);
8882                 crtc_state->update_planes |= BIT(linked->id);
8883                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
8884                             linked->base.name, plane->base.name);
8885
8886                 /* Copy parameters to slave plane */
8887                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
8888                 linked_state->color_ctl = plane_state->color_ctl;
8889                 linked_state->view = plane_state->view;
8890                 memcpy(linked_state->color_plane, plane_state->color_plane,
8891                        sizeof(linked_state->color_plane));
8892
8893                 intel_plane_copy_hw_state(linked_state, plane_state);
8894                 linked_state->uapi.src = plane_state->uapi.src;
8895                 linked_state->uapi.dst = plane_state->uapi.dst;
8896
8897                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
8898                         if (linked->id == PLANE_SPRITE5)
8899                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
8900                         else if (linked->id == PLANE_SPRITE4)
8901                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
8902                         else if (linked->id == PLANE_SPRITE3)
8903                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
8904                         else if (linked->id == PLANE_SPRITE2)
8905                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
8906                         else
8907                                 MISSING_CASE(linked->id);
8908                 }
8909         }
8910
8911         return 0;
8912 }
8913
8914 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
8915 {
8916         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8917         struct intel_atomic_state *state =
8918                 to_intel_atomic_state(new_crtc_state->uapi.state);
8919         const struct intel_crtc_state *old_crtc_state =
8920                 intel_atomic_get_old_crtc_state(state, crtc);
8921
8922         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
8923 }
8924
8925 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
8926 {
8927         const struct drm_display_mode *pipe_mode =
8928                 &crtc_state->hw.pipe_mode;
8929         int linetime_wm;
8930
8931         if (!crtc_state->hw.enable)
8932                 return 0;
8933
8934         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
8935                                         pipe_mode->crtc_clock);
8936
8937         return min(linetime_wm, 0x1ff);
8938 }
8939
8940 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
8941                                const struct intel_cdclk_state *cdclk_state)
8942 {
8943         const struct drm_display_mode *pipe_mode =
8944                 &crtc_state->hw.pipe_mode;
8945         int linetime_wm;
8946
8947         if (!crtc_state->hw.enable)
8948                 return 0;
8949
8950         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
8951                                         cdclk_state->logical.cdclk);
8952
8953         return min(linetime_wm, 0x1ff);
8954 }
8955
8956 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
8957 {
8958         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8959         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8960         const struct drm_display_mode *pipe_mode =
8961                 &crtc_state->hw.pipe_mode;
8962         int linetime_wm;
8963
8964         if (!crtc_state->hw.enable)
8965                 return 0;
8966
8967         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
8968                                    crtc_state->pixel_rate);
8969
8970         /* Display WA #1135: BXT:ALL GLK:ALL */
8971         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
8972                 linetime_wm /= 2;
8973
8974         return min(linetime_wm, 0x1ff);
8975 }
8976
8977 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
8978                                    struct intel_crtc *crtc)
8979 {
8980         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8981         struct intel_crtc_state *crtc_state =
8982                 intel_atomic_get_new_crtc_state(state, crtc);
8983         const struct intel_cdclk_state *cdclk_state;
8984
8985         if (INTEL_GEN(dev_priv) >= 9)
8986                 crtc_state->linetime = skl_linetime_wm(crtc_state);
8987         else
8988                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
8989
8990         if (!hsw_crtc_supports_ips(crtc))
8991                 return 0;
8992
8993         cdclk_state = intel_atomic_get_cdclk_state(state);
8994         if (IS_ERR(cdclk_state))
8995                 return PTR_ERR(cdclk_state);
8996
8997         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
8998                                                        cdclk_state);
8999
9000         return 0;
9001 }
9002
9003 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
9004                                    struct intel_crtc *crtc)
9005 {
9006         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9007         struct intel_crtc_state *crtc_state =
9008                 intel_atomic_get_new_crtc_state(state, crtc);
9009         bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9010         int ret;
9011
9012         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
9013             mode_changed && !crtc_state->hw.active)
9014                 crtc_state->update_wm_post = true;
9015
9016         if (mode_changed && crtc_state->hw.enable &&
9017             dev_priv->display.crtc_compute_clock &&
9018             !crtc_state->bigjoiner_slave &&
9019             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
9020                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
9021                 if (ret)
9022                         return ret;
9023         }
9024
9025         /*
9026          * May need to update pipe gamma enable bits
9027          * when C8 planes are getting enabled/disabled.
9028          */
9029         if (c8_planes_changed(crtc_state))
9030                 crtc_state->uapi.color_mgmt_changed = true;
9031
9032         if (mode_changed || crtc_state->update_pipe ||
9033             crtc_state->uapi.color_mgmt_changed) {
9034                 ret = intel_color_check(crtc_state);
9035                 if (ret)
9036                         return ret;
9037         }
9038
9039         if (dev_priv->display.compute_pipe_wm) {
9040                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
9041                 if (ret) {
9042                         drm_dbg_kms(&dev_priv->drm,
9043                                     "Target pipe watermarks are invalid\n");
9044                         return ret;
9045                 }
9046         }
9047
9048         if (dev_priv->display.compute_intermediate_wm) {
9049                 if (drm_WARN_ON(&dev_priv->drm,
9050                                 !dev_priv->display.compute_pipe_wm))
9051                         return 0;
9052
9053                 /*
9054                  * Calculate 'intermediate' watermarks that satisfy both the
9055                  * old state and the new state.  We can program these
9056                  * immediately.
9057                  */
9058                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
9059                 if (ret) {
9060                         drm_dbg_kms(&dev_priv->drm,
9061                                     "No valid intermediate pipe watermarks are possible\n");
9062                         return ret;
9063                 }
9064         }
9065
9066         if (INTEL_GEN(dev_priv) >= 9) {
9067                 if (mode_changed || crtc_state->update_pipe) {
9068                         ret = skl_update_scaler_crtc(crtc_state);
9069                         if (ret)
9070                                 return ret;
9071                 }
9072
9073                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
9074                 if (ret)
9075                         return ret;
9076         }
9077
9078         if (HAS_IPS(dev_priv)) {
9079                 ret = hsw_compute_ips_config(crtc_state);
9080                 if (ret)
9081                         return ret;
9082         }
9083
9084         if (INTEL_GEN(dev_priv) >= 9 ||
9085             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9086                 ret = hsw_compute_linetime_wm(state, crtc);
9087                 if (ret)
9088                         return ret;
9089
9090         }
9091
9092         if (!mode_changed) {
9093                 ret = intel_psr2_sel_fetch_update(state, crtc);
9094                 if (ret)
9095                         return ret;
9096         }
9097
9098         return 0;
9099 }
9100
9101 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
9102 {
9103         struct intel_connector *connector;
9104         struct drm_connector_list_iter conn_iter;
9105
9106         drm_connector_list_iter_begin(dev, &conn_iter);
9107         for_each_intel_connector_iter(connector, &conn_iter) {
9108                 if (connector->base.state->crtc)
9109                         drm_connector_put(&connector->base);
9110
9111                 if (connector->base.encoder) {
9112                         connector->base.state->best_encoder =
9113                                 connector->base.encoder;
9114                         connector->base.state->crtc =
9115                                 connector->base.encoder->crtc;
9116
9117                         drm_connector_get(&connector->base);
9118                 } else {
9119                         connector->base.state->best_encoder = NULL;
9120                         connector->base.state->crtc = NULL;
9121                 }
9122         }
9123         drm_connector_list_iter_end(&conn_iter);
9124 }
9125
9126 static int
9127 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
9128                       struct intel_crtc_state *pipe_config)
9129 {
9130         struct drm_connector *connector = conn_state->connector;
9131         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
9132         const struct drm_display_info *info = &connector->display_info;
9133         int bpp;
9134
9135         switch (conn_state->max_bpc) {
9136         case 6 ... 7:
9137                 bpp = 6 * 3;
9138                 break;
9139         case 8 ... 9:
9140                 bpp = 8 * 3;
9141                 break;
9142         case 10 ... 11:
9143                 bpp = 10 * 3;
9144                 break;
9145         case 12 ... 16:
9146                 bpp = 12 * 3;
9147                 break;
9148         default:
9149                 MISSING_CASE(conn_state->max_bpc);
9150                 return -EINVAL;
9151         }
9152
9153         if (bpp < pipe_config->pipe_bpp) {
9154                 drm_dbg_kms(&i915->drm,
9155                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
9156                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
9157                             connector->base.id, connector->name,
9158                             bpp, 3 * info->bpc,
9159                             3 * conn_state->max_requested_bpc,
9160                             pipe_config->pipe_bpp);
9161
9162                 pipe_config->pipe_bpp = bpp;
9163         }
9164
9165         return 0;
9166 }
9167
9168 static int
9169 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9170                           struct intel_crtc_state *pipe_config)
9171 {
9172         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9173         struct drm_atomic_state *state = pipe_config->uapi.state;
9174         struct drm_connector *connector;
9175         struct drm_connector_state *connector_state;
9176         int bpp, i;
9177
9178         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9179             IS_CHERRYVIEW(dev_priv)))
9180                 bpp = 10*3;
9181         else if (INTEL_GEN(dev_priv) >= 5)
9182                 bpp = 12*3;
9183         else
9184                 bpp = 8*3;
9185
9186         pipe_config->pipe_bpp = bpp;
9187
9188         /* Clamp display bpp to connector max bpp */
9189         for_each_new_connector_in_state(state, connector, connector_state, i) {
9190                 int ret;
9191
9192                 if (connector_state->crtc != &crtc->base)
9193                         continue;
9194
9195                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
9196                 if (ret)
9197                         return ret;
9198         }
9199
9200         return 0;
9201 }
9202
9203 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
9204                                     const struct drm_display_mode *mode)
9205 {
9206         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
9207                     "type: 0x%x flags: 0x%x\n",
9208                     mode->crtc_clock,
9209                     mode->crtc_hdisplay, mode->crtc_hsync_start,
9210                     mode->crtc_hsync_end, mode->crtc_htotal,
9211                     mode->crtc_vdisplay, mode->crtc_vsync_start,
9212                     mode->crtc_vsync_end, mode->crtc_vtotal,
9213                     mode->type, mode->flags);
9214 }
9215
9216 static void
9217 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
9218                       const char *id, unsigned int lane_count,
9219                       const struct intel_link_m_n *m_n)
9220 {
9221         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
9222
9223         drm_dbg_kms(&i915->drm,
9224                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9225                     id, lane_count,
9226                     m_n->gmch_m, m_n->gmch_n,
9227                     m_n->link_m, m_n->link_n, m_n->tu);
9228 }
9229
9230 static void
9231 intel_dump_infoframe(struct drm_i915_private *dev_priv,
9232                      const union hdmi_infoframe *frame)
9233 {
9234         if (!drm_debug_enabled(DRM_UT_KMS))
9235                 return;
9236
9237         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
9238 }
9239
9240 static void
9241 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
9242                       const struct drm_dp_vsc_sdp *vsc)
9243 {
9244         if (!drm_debug_enabled(DRM_UT_KMS))
9245                 return;
9246
9247         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
9248 }
9249
9250 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
9251
9252 static const char * const output_type_str[] = {
9253         OUTPUT_TYPE(UNUSED),
9254         OUTPUT_TYPE(ANALOG),
9255         OUTPUT_TYPE(DVO),
9256         OUTPUT_TYPE(SDVO),
9257         OUTPUT_TYPE(LVDS),
9258         OUTPUT_TYPE(TVOUT),
9259         OUTPUT_TYPE(HDMI),
9260         OUTPUT_TYPE(DP),
9261         OUTPUT_TYPE(EDP),
9262         OUTPUT_TYPE(DSI),
9263         OUTPUT_TYPE(DDI),
9264         OUTPUT_TYPE(DP_MST),
9265 };
9266
9267 #undef OUTPUT_TYPE
9268
9269 static void snprintf_output_types(char *buf, size_t len,
9270                                   unsigned int output_types)
9271 {
9272         char *str = buf;
9273         int i;
9274
9275         str[0] = '\0';
9276
9277         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
9278                 int r;
9279
9280                 if ((output_types & BIT(i)) == 0)
9281                         continue;
9282
9283                 r = snprintf(str, len, "%s%s",
9284                              str != buf ? "," : "", output_type_str[i]);
9285                 if (r >= len)
9286                         break;
9287                 str += r;
9288                 len -= r;
9289
9290                 output_types &= ~BIT(i);
9291         }
9292
9293         WARN_ON_ONCE(output_types != 0);
9294 }
9295
9296 static const char * const output_format_str[] = {
9297         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
9298         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
9299         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
9300         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
9301 };
9302
9303 static const char *output_formats(enum intel_output_format format)
9304 {
9305         if (format >= ARRAY_SIZE(output_format_str))
9306                 format = INTEL_OUTPUT_FORMAT_INVALID;
9307         return output_format_str[format];
9308 }
9309
9310 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
9311 {
9312         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
9313         struct drm_i915_private *i915 = to_i915(plane->base.dev);
9314         const struct drm_framebuffer *fb = plane_state->hw.fb;
9315         struct drm_format_name_buf format_name;
9316
9317         if (!fb) {
9318                 drm_dbg_kms(&i915->drm,
9319                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
9320                             plane->base.base.id, plane->base.name,
9321                             yesno(plane_state->uapi.visible));
9322                 return;
9323         }
9324
9325         drm_dbg_kms(&i915->drm,
9326                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
9327                     plane->base.base.id, plane->base.name,
9328                     fb->base.id, fb->width, fb->height,
9329                     drm_get_format_name(fb->format->format, &format_name),
9330                     fb->modifier, yesno(plane_state->uapi.visible));
9331         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
9332                     plane_state->hw.rotation, plane_state->scaler_id);
9333         if (plane_state->uapi.visible)
9334                 drm_dbg_kms(&i915->drm,
9335                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
9336                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
9337                             DRM_RECT_ARG(&plane_state->uapi.dst));
9338 }
9339
9340 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
9341                                    struct intel_atomic_state *state,
9342                                    const char *context)
9343 {
9344         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
9345         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9346         const struct intel_plane_state *plane_state;
9347         struct intel_plane *plane;
9348         char buf[64];
9349         int i;
9350
9351         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
9352                     crtc->base.base.id, crtc->base.name,
9353                     yesno(pipe_config->hw.enable), context);
9354
9355         if (!pipe_config->hw.enable)
9356                 goto dump_planes;
9357
9358         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
9359         drm_dbg_kms(&dev_priv->drm,
9360                     "active: %s, output_types: %s (0x%x), output format: %s\n",
9361                     yesno(pipe_config->hw.active),
9362                     buf, pipe_config->output_types,
9363                     output_formats(pipe_config->output_format));
9364
9365         drm_dbg_kms(&dev_priv->drm,
9366                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
9367                     transcoder_name(pipe_config->cpu_transcoder),
9368                     pipe_config->pipe_bpp, pipe_config->dither);
9369
9370         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
9371                     transcoder_name(pipe_config->mst_master_transcoder));
9372
9373         drm_dbg_kms(&dev_priv->drm,
9374                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
9375                     transcoder_name(pipe_config->master_transcoder),
9376                     pipe_config->sync_mode_slaves_mask);
9377
9378         drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
9379                     pipe_config->bigjoiner_slave ? "slave" :
9380                     pipe_config->bigjoiner ? "master" : "no");
9381
9382         if (pipe_config->has_pch_encoder)
9383                 intel_dump_m_n_config(pipe_config, "fdi",
9384                                       pipe_config->fdi_lanes,
9385                                       &pipe_config->fdi_m_n);
9386
9387         if (intel_crtc_has_dp_encoder(pipe_config)) {
9388                 intel_dump_m_n_config(pipe_config, "dp m_n",
9389                                 pipe_config->lane_count, &pipe_config->dp_m_n);
9390                 if (pipe_config->has_drrs)
9391                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
9392                                               pipe_config->lane_count,
9393                                               &pipe_config->dp_m2_n2);
9394         }
9395
9396         drm_dbg_kms(&dev_priv->drm,
9397                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
9398                     pipe_config->has_audio, pipe_config->has_infoframe,
9399                     pipe_config->infoframes.enable);
9400
9401         if (pipe_config->infoframes.enable &
9402             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
9403                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
9404                             pipe_config->infoframes.gcp);
9405         if (pipe_config->infoframes.enable &
9406             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
9407                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
9408         if (pipe_config->infoframes.enable &
9409             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
9410                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
9411         if (pipe_config->infoframes.enable &
9412             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
9413                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
9414         if (pipe_config->infoframes.enable &
9415             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
9416                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
9417         if (pipe_config->infoframes.enable &
9418             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
9419                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
9420         if (pipe_config->infoframes.enable &
9421             intel_hdmi_infoframe_enable(DP_SDP_VSC))
9422                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
9423
9424         drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
9425                     yesno(pipe_config->vrr.enable),
9426                     pipe_config->vrr.vmin, pipe_config->vrr.vmax,
9427                     pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
9428                     intel_vrr_vmin_vblank_start(pipe_config),
9429                     intel_vrr_vmax_vblank_start(pipe_config));
9430
9431         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
9432         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
9433         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
9434         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
9435         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
9436         drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
9437         drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
9438         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
9439         drm_dbg_kms(&dev_priv->drm,
9440                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
9441                     pipe_config->port_clock,
9442                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
9443                     pipe_config->pixel_rate);
9444
9445         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
9446                     pipe_config->linetime, pipe_config->ips_linetime);
9447
9448         if (INTEL_GEN(dev_priv) >= 9)
9449                 drm_dbg_kms(&dev_priv->drm,
9450                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
9451                             crtc->num_scalers,
9452                             pipe_config->scaler_state.scaler_users,
9453                             pipe_config->scaler_state.scaler_id);
9454
9455         if (HAS_GMCH(dev_priv))
9456                 drm_dbg_kms(&dev_priv->drm,
9457                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9458                             pipe_config->gmch_pfit.control,
9459                             pipe_config->gmch_pfit.pgm_ratios,
9460                             pipe_config->gmch_pfit.lvds_border_bits);
9461         else
9462                 drm_dbg_kms(&dev_priv->drm,
9463                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
9464                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
9465                             enableddisabled(pipe_config->pch_pfit.enabled),
9466                             yesno(pipe_config->pch_pfit.force_thru));
9467
9468         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
9469                     pipe_config->ips_enabled, pipe_config->double_wide);
9470
9471         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
9472
9473         if (IS_CHERRYVIEW(dev_priv))
9474                 drm_dbg_kms(&dev_priv->drm,
9475                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
9476                             pipe_config->cgm_mode, pipe_config->gamma_mode,
9477                             pipe_config->gamma_enable, pipe_config->csc_enable);
9478         else
9479                 drm_dbg_kms(&dev_priv->drm,
9480                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
9481                             pipe_config->csc_mode, pipe_config->gamma_mode,
9482                             pipe_config->gamma_enable, pipe_config->csc_enable);
9483
9484         drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
9485                     pipe_config->hw.degamma_lut ?
9486                     drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
9487                     pipe_config->hw.gamma_lut ?
9488                     drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
9489
9490 dump_planes:
9491         if (!state)
9492                 return;
9493
9494         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9495                 if (plane->pipe == crtc->pipe)
9496                         intel_dump_plane_state(plane_state);
9497         }
9498 }
9499
9500 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
9501 {
9502         struct drm_device *dev = state->base.dev;
9503         struct drm_connector *connector;
9504         struct drm_connector_list_iter conn_iter;
9505         unsigned int used_ports = 0;
9506         unsigned int used_mst_ports = 0;
9507         bool ret = true;
9508
9509         /*
9510          * We're going to peek into connector->state,
9511          * hence connection_mutex must be held.
9512          */
9513         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
9514
9515         /*
9516          * Walk the connector list instead of the encoder
9517          * list to detect the problem on ddi platforms
9518          * where there's just one encoder per digital port.
9519          */
9520         drm_connector_list_iter_begin(dev, &conn_iter);
9521         drm_for_each_connector_iter(connector, &conn_iter) {
9522                 struct drm_connector_state *connector_state;
9523                 struct intel_encoder *encoder;
9524
9525                 connector_state =
9526                         drm_atomic_get_new_connector_state(&state->base,
9527                                                            connector);
9528                 if (!connector_state)
9529                         connector_state = connector->state;
9530
9531                 if (!connector_state->best_encoder)
9532                         continue;
9533
9534                 encoder = to_intel_encoder(connector_state->best_encoder);
9535
9536                 drm_WARN_ON(dev, !connector_state->crtc);
9537
9538                 switch (encoder->type) {
9539                 case INTEL_OUTPUT_DDI:
9540                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
9541                                 break;
9542                         fallthrough;
9543                 case INTEL_OUTPUT_DP:
9544                 case INTEL_OUTPUT_HDMI:
9545                 case INTEL_OUTPUT_EDP:
9546                         /* the same port mustn't appear more than once */
9547                         if (used_ports & BIT(encoder->port))
9548                                 ret = false;
9549
9550                         used_ports |= BIT(encoder->port);
9551                         break;
9552                 case INTEL_OUTPUT_DP_MST:
9553                         used_mst_ports |=
9554                                 1 << encoder->port;
9555                         break;
9556                 default:
9557                         break;
9558                 }
9559         }
9560         drm_connector_list_iter_end(&conn_iter);
9561
9562         /* can't mix MST and SST/HDMI on the same port */
9563         if (used_ports & used_mst_ports)
9564                 return false;
9565
9566         return ret;
9567 }
9568
9569 static void
9570 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
9571                                            struct intel_crtc_state *crtc_state)
9572 {
9573         const struct intel_crtc_state *from_crtc_state = crtc_state;
9574
9575         if (crtc_state->bigjoiner_slave) {
9576                 from_crtc_state = intel_atomic_get_new_crtc_state(state,
9577                                                                   crtc_state->bigjoiner_linked_crtc);
9578
9579                 /* No need to copy state if the master state is unchanged */
9580                 if (!from_crtc_state)
9581                         return;
9582         }
9583
9584         intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
9585 }
9586
9587 static void
9588 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
9589                                  struct intel_crtc_state *crtc_state)
9590 {
9591         crtc_state->hw.enable = crtc_state->uapi.enable;
9592         crtc_state->hw.active = crtc_state->uapi.active;
9593         crtc_state->hw.mode = crtc_state->uapi.mode;
9594         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
9595         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
9596
9597         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
9598 }
9599
9600 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
9601 {
9602         if (crtc_state->bigjoiner_slave)
9603                 return;
9604
9605         crtc_state->uapi.enable = crtc_state->hw.enable;
9606         crtc_state->uapi.active = crtc_state->hw.active;
9607         drm_WARN_ON(crtc_state->uapi.crtc->dev,
9608                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
9609
9610         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
9611         crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
9612
9613         /* copy color blobs to uapi */
9614         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
9615                                   crtc_state->hw.degamma_lut);
9616         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
9617                                   crtc_state->hw.gamma_lut);
9618         drm_property_replace_blob(&crtc_state->uapi.ctm,
9619                                   crtc_state->hw.ctm);
9620 }
9621
9622 static int
9623 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
9624                           const struct intel_crtc_state *from_crtc_state)
9625 {
9626         struct intel_crtc_state *saved_state;
9627         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9628
9629         saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
9630         if (!saved_state)
9631                 return -ENOMEM;
9632
9633         saved_state->uapi = crtc_state->uapi;
9634         saved_state->scaler_state = crtc_state->scaler_state;
9635         saved_state->shared_dpll = crtc_state->shared_dpll;
9636         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
9637         saved_state->crc_enabled = crtc_state->crc_enabled;
9638
9639         intel_crtc_free_hw_state(crtc_state);
9640         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
9641         kfree(saved_state);
9642
9643         /* Re-init hw state */
9644         memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
9645         crtc_state->hw.enable = from_crtc_state->hw.enable;
9646         crtc_state->hw.active = from_crtc_state->hw.active;
9647         crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
9648         crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
9649
9650         /* Some fixups */
9651         crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
9652         crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
9653         crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
9654         crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
9655         crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
9656         crtc_state->bigjoiner_slave = true;
9657         crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
9658         crtc_state->has_audio = false;
9659
9660         return 0;
9661 }
9662
9663 static int
9664 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
9665                                  struct intel_crtc_state *crtc_state)
9666 {
9667         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9668         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9669         struct intel_crtc_state *saved_state;
9670
9671         saved_state = intel_crtc_state_alloc(crtc);
9672         if (!saved_state)
9673                 return -ENOMEM;
9674
9675         /* free the old crtc_state->hw members */
9676         intel_crtc_free_hw_state(crtc_state);
9677
9678         /* FIXME: before the switch to atomic started, a new pipe_config was
9679          * kzalloc'd. Code that depends on any field being zero should be
9680          * fixed, so that the crtc_state can be safely duplicated. For now,
9681          * only fields that are know to not cause problems are preserved. */
9682
9683         saved_state->uapi = crtc_state->uapi;
9684         saved_state->scaler_state = crtc_state->scaler_state;
9685         saved_state->shared_dpll = crtc_state->shared_dpll;
9686         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
9687         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
9688                sizeof(saved_state->icl_port_dplls));
9689         saved_state->crc_enabled = crtc_state->crc_enabled;
9690         if (IS_G4X(dev_priv) ||
9691             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9692                 saved_state->wm = crtc_state->wm;
9693
9694         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
9695         kfree(saved_state);
9696
9697         intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
9698
9699         return 0;
9700 }
9701
9702 static int
9703 intel_modeset_pipe_config(struct intel_atomic_state *state,
9704                           struct intel_crtc_state *pipe_config)
9705 {
9706         struct drm_crtc *crtc = pipe_config->uapi.crtc;
9707         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
9708         struct drm_connector *connector;
9709         struct drm_connector_state *connector_state;
9710         int base_bpp, ret, i;
9711         bool retry = true;
9712
9713         pipe_config->cpu_transcoder =
9714                 (enum transcoder) to_intel_crtc(crtc)->pipe;
9715
9716         /*
9717          * Sanitize sync polarity flags based on requested ones. If neither
9718          * positive or negative polarity is requested, treat this as meaning
9719          * negative polarity.
9720          */
9721         if (!(pipe_config->hw.adjusted_mode.flags &
9722               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9723                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9724
9725         if (!(pipe_config->hw.adjusted_mode.flags &
9726               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9727                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9728
9729         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9730                                         pipe_config);
9731         if (ret)
9732                 return ret;
9733
9734         base_bpp = pipe_config->pipe_bpp;
9735
9736         /*
9737          * Determine the real pipe dimensions. Note that stereo modes can
9738          * increase the actual pipe size due to the frame doubling and
9739          * insertion of additional space for blanks between the frame. This
9740          * is stored in the crtc timings. We use the requested mode to do this
9741          * computation to clearly distinguish it from the adjusted mode, which
9742          * can be changed by the connectors in the below retry loop.
9743          */
9744         drm_mode_get_hv_timing(&pipe_config->hw.mode,
9745                                &pipe_config->pipe_src_w,
9746                                &pipe_config->pipe_src_h);
9747
9748         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
9749                 struct intel_encoder *encoder =
9750                         to_intel_encoder(connector_state->best_encoder);
9751
9752                 if (connector_state->crtc != crtc)
9753                         continue;
9754
9755                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
9756                         drm_dbg_kms(&i915->drm,
9757                                     "rejecting invalid cloning configuration\n");
9758                         return -EINVAL;
9759                 }
9760
9761                 /*
9762                  * Determine output_types before calling the .compute_config()
9763                  * hooks so that the hooks can use this information safely.
9764                  */
9765                 if (encoder->compute_output_type)
9766                         pipe_config->output_types |=
9767                                 BIT(encoder->compute_output_type(encoder, pipe_config,
9768                                                                  connector_state));
9769                 else
9770                         pipe_config->output_types |= BIT(encoder->type);
9771         }
9772
9773 encoder_retry:
9774         /* Ensure the port clock defaults are reset when retrying. */
9775         pipe_config->port_clock = 0;
9776         pipe_config->pixel_multiplier = 1;
9777
9778         /* Fill in default crtc timings, allow encoders to overwrite them. */
9779         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
9780                               CRTC_STEREO_DOUBLE);
9781
9782         /* Pass our mode to the connectors and the CRTC to give them a chance to
9783          * adjust it according to limitations or connector properties, and also
9784          * a chance to reject the mode entirely.
9785          */
9786         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
9787                 struct intel_encoder *encoder =
9788                         to_intel_encoder(connector_state->best_encoder);
9789
9790                 if (connector_state->crtc != crtc)
9791                         continue;
9792
9793                 ret = encoder->compute_config(encoder, pipe_config,
9794                                               connector_state);
9795                 if (ret < 0) {
9796                         if (ret != -EDEADLK)
9797                                 drm_dbg_kms(&i915->drm,
9798                                             "Encoder config failure: %d\n",
9799                                             ret);
9800                         return ret;
9801                 }
9802         }
9803
9804         /* Set default port clock if not overwritten by the encoder. Needs to be
9805          * done afterwards in case the encoder adjusts the mode. */
9806         if (!pipe_config->port_clock)
9807                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
9808                         * pipe_config->pixel_multiplier;
9809
9810         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9811         if (ret == -EDEADLK)
9812                 return ret;
9813         if (ret < 0) {
9814                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
9815                 return ret;
9816         }
9817
9818         if (ret == I915_DISPLAY_CONFIG_RETRY) {
9819                 if (drm_WARN(&i915->drm, !retry,
9820                              "loop in pipe configuration computation\n"))
9821                         return -EINVAL;
9822
9823                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
9824                 retry = false;
9825                 goto encoder_retry;
9826         }
9827
9828         /* Dithering seems to not pass-through bits correctly when it should, so
9829          * only enable it on 6bpc panels and when its not a compliance
9830          * test requesting 6bpc video pattern.
9831          */
9832         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
9833                 !pipe_config->dither_force_disable;
9834         drm_dbg_kms(&i915->drm,
9835                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
9836                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9837
9838         return 0;
9839 }
9840
9841 static int
9842 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
9843 {
9844         struct intel_atomic_state *state =
9845                 to_intel_atomic_state(crtc_state->uapi.state);
9846         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9847         struct drm_connector_state *conn_state;
9848         struct drm_connector *connector;
9849         int i;
9850
9851         for_each_new_connector_in_state(&state->base, connector,
9852                                         conn_state, i) {
9853                 struct intel_encoder *encoder =
9854                         to_intel_encoder(conn_state->best_encoder);
9855                 int ret;
9856
9857                 if (conn_state->crtc != &crtc->base ||
9858                     !encoder->compute_config_late)
9859                         continue;
9860
9861                 ret = encoder->compute_config_late(encoder, crtc_state,
9862                                                    conn_state);
9863                 if (ret)
9864                         return ret;
9865         }
9866
9867         return 0;
9868 }
9869
9870 bool intel_fuzzy_clock_check(int clock1, int clock2)
9871 {
9872         int diff;
9873
9874         if (clock1 == clock2)
9875                 return true;
9876
9877         if (!clock1 || !clock2)
9878                 return false;
9879
9880         diff = abs(clock1 - clock2);
9881
9882         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9883                 return true;
9884
9885         return false;
9886 }
9887
9888 static bool
9889 intel_compare_m_n(unsigned int m, unsigned int n,
9890                   unsigned int m2, unsigned int n2,
9891                   bool exact)
9892 {
9893         if (m == m2 && n == n2)
9894                 return true;
9895
9896         if (exact || !m || !n || !m2 || !n2)
9897                 return false;
9898
9899         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
9900
9901         if (n > n2) {
9902                 while (n > n2) {
9903                         m2 <<= 1;
9904                         n2 <<= 1;
9905                 }
9906         } else if (n < n2) {
9907                 while (n < n2) {
9908                         m <<= 1;
9909                         n <<= 1;
9910                 }
9911         }
9912
9913         if (n != n2)
9914                 return false;
9915
9916         return intel_fuzzy_clock_check(m, m2);
9917 }
9918
9919 static bool
9920 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
9921                        const struct intel_link_m_n *m2_n2,
9922                        bool exact)
9923 {
9924         return m_n->tu == m2_n2->tu &&
9925                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
9926                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
9927                 intel_compare_m_n(m_n->link_m, m_n->link_n,
9928                                   m2_n2->link_m, m2_n2->link_n, exact);
9929 }
9930
9931 static bool
9932 intel_compare_infoframe(const union hdmi_infoframe *a,
9933                         const union hdmi_infoframe *b)
9934 {
9935         return memcmp(a, b, sizeof(*a)) == 0;
9936 }
9937
9938 static bool
9939 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
9940                          const struct drm_dp_vsc_sdp *b)
9941 {
9942         return memcmp(a, b, sizeof(*a)) == 0;
9943 }
9944
9945 static void
9946 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
9947                                bool fastset, const char *name,
9948                                const union hdmi_infoframe *a,
9949                                const union hdmi_infoframe *b)
9950 {
9951         if (fastset) {
9952                 if (!drm_debug_enabled(DRM_UT_KMS))
9953                         return;
9954
9955                 drm_dbg_kms(&dev_priv->drm,
9956                             "fastset mismatch in %s infoframe\n", name);
9957                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
9958                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
9959                 drm_dbg_kms(&dev_priv->drm, "found:\n");
9960                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
9961         } else {
9962                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
9963                 drm_err(&dev_priv->drm, "expected:\n");
9964                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
9965                 drm_err(&dev_priv->drm, "found:\n");
9966                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
9967         }
9968 }
9969
9970 static void
9971 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
9972                                 bool fastset, const char *name,
9973                                 const struct drm_dp_vsc_sdp *a,
9974                                 const struct drm_dp_vsc_sdp *b)
9975 {
9976         if (fastset) {
9977                 if (!drm_debug_enabled(DRM_UT_KMS))
9978                         return;
9979
9980                 drm_dbg_kms(&dev_priv->drm,
9981                             "fastset mismatch in %s dp sdp\n", name);
9982                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
9983                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
9984                 drm_dbg_kms(&dev_priv->drm, "found:\n");
9985                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
9986         } else {
9987                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
9988                 drm_err(&dev_priv->drm, "expected:\n");
9989                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
9990                 drm_err(&dev_priv->drm, "found:\n");
9991                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
9992         }
9993 }
9994
9995 static void __printf(4, 5)
9996 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
9997                      const char *name, const char *format, ...)
9998 {
9999         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
10000         struct va_format vaf;
10001         va_list args;
10002
10003         va_start(args, format);
10004         vaf.fmt = format;
10005         vaf.va = &args;
10006
10007         if (fastset)
10008                 drm_dbg_kms(&i915->drm,
10009                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
10010                             crtc->base.base.id, crtc->base.name, name, &vaf);
10011         else
10012                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
10013                         crtc->base.base.id, crtc->base.name, name, &vaf);
10014
10015         va_end(args);
10016 }
10017
10018 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
10019 {
10020         if (dev_priv->params.fastboot != -1)
10021                 return dev_priv->params.fastboot;
10022
10023         /* Enable fastboot by default on Skylake and newer */
10024         if (INTEL_GEN(dev_priv) >= 9)
10025                 return true;
10026
10027         /* Enable fastboot by default on VLV and CHV */
10028         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10029                 return true;
10030
10031         /* Disabled by default on all others */
10032         return false;
10033 }
10034
10035 static bool
10036 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
10037                           const struct intel_crtc_state *pipe_config,
10038                           bool fastset)
10039 {
10040         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
10041         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
10042         bool ret = true;
10043         u32 bp_gamma = 0;
10044         bool fixup_inherited = fastset &&
10045                 current_config->inherited && !pipe_config->inherited;
10046
10047         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
10048                 drm_dbg_kms(&dev_priv->drm,
10049                             "initial modeset and fastboot not set\n");
10050                 ret = false;
10051         }
10052
10053 #define PIPE_CONF_CHECK_X(name) do { \
10054         if (current_config->name != pipe_config->name) { \
10055                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10056                                      "(expected 0x%08x, found 0x%08x)", \
10057                                      current_config->name, \
10058                                      pipe_config->name); \
10059                 ret = false; \
10060         } \
10061 } while (0)
10062
10063 #define PIPE_CONF_CHECK_I(name) do { \
10064         if (current_config->name != pipe_config->name) { \
10065                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10066                                      "(expected %i, found %i)", \
10067                                      current_config->name, \
10068                                      pipe_config->name); \
10069                 ret = false; \
10070         } \
10071 } while (0)
10072
10073 #define PIPE_CONF_CHECK_BOOL(name) do { \
10074         if (current_config->name != pipe_config->name) { \
10075                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
10076                                      "(expected %s, found %s)", \
10077                                      yesno(current_config->name), \
10078                                      yesno(pipe_config->name)); \
10079                 ret = false; \
10080         } \
10081 } while (0)
10082
10083 /*
10084  * Checks state where we only read out the enabling, but not the entire
10085  * state itself (like full infoframes or ELD for audio). These states
10086  * require a full modeset on bootup to fix up.
10087  */
10088 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
10089         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
10090                 PIPE_CONF_CHECK_BOOL(name); \
10091         } else { \
10092                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10093                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
10094                                      yesno(current_config->name), \
10095                                      yesno(pipe_config->name)); \
10096                 ret = false; \
10097         } \
10098 } while (0)
10099
10100 #define PIPE_CONF_CHECK_P(name) do { \
10101         if (current_config->name != pipe_config->name) { \
10102                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10103                                      "(expected %p, found %p)", \
10104                                      current_config->name, \
10105                                      pipe_config->name); \
10106                 ret = false; \
10107         } \
10108 } while (0)
10109
10110 #define PIPE_CONF_CHECK_M_N(name) do { \
10111         if (!intel_compare_link_m_n(&current_config->name, \
10112                                     &pipe_config->name,\
10113                                     !fastset)) { \
10114                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10115                                      "(expected tu %i gmch %i/%i link %i/%i, " \
10116                                      "found tu %i, gmch %i/%i link %i/%i)", \
10117                                      current_config->name.tu, \
10118                                      current_config->name.gmch_m, \
10119                                      current_config->name.gmch_n, \
10120                                      current_config->name.link_m, \
10121                                      current_config->name.link_n, \
10122                                      pipe_config->name.tu, \
10123                                      pipe_config->name.gmch_m, \
10124                                      pipe_config->name.gmch_n, \
10125                                      pipe_config->name.link_m, \
10126                                      pipe_config->name.link_n); \
10127                 ret = false; \
10128         } \
10129 } while (0)
10130
10131 /* This is required for BDW+ where there is only one set of registers for
10132  * switching between high and low RR.
10133  * This macro can be used whenever a comparison has to be made between one
10134  * hw state and multiple sw state variables.
10135  */
10136 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
10137         if (!intel_compare_link_m_n(&current_config->name, \
10138                                     &pipe_config->name, !fastset) && \
10139             !intel_compare_link_m_n(&current_config->alt_name, \
10140                                     &pipe_config->name, !fastset)) { \
10141                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10142                                      "(expected tu %i gmch %i/%i link %i/%i, " \
10143                                      "or tu %i gmch %i/%i link %i/%i, " \
10144                                      "found tu %i, gmch %i/%i link %i/%i)", \
10145                                      current_config->name.tu, \
10146                                      current_config->name.gmch_m, \
10147                                      current_config->name.gmch_n, \
10148                                      current_config->name.link_m, \
10149                                      current_config->name.link_n, \
10150                                      current_config->alt_name.tu, \
10151                                      current_config->alt_name.gmch_m, \
10152                                      current_config->alt_name.gmch_n, \
10153                                      current_config->alt_name.link_m, \
10154                                      current_config->alt_name.link_n, \
10155                                      pipe_config->name.tu, \
10156                                      pipe_config->name.gmch_m, \
10157                                      pipe_config->name.gmch_n, \
10158                                      pipe_config->name.link_m, \
10159                                      pipe_config->name.link_n); \
10160                 ret = false; \
10161         } \
10162 } while (0)
10163
10164 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
10165         if ((current_config->name ^ pipe_config->name) & (mask)) { \
10166                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10167                                      "(%x) (expected %i, found %i)", \
10168                                      (mask), \
10169                                      current_config->name & (mask), \
10170                                      pipe_config->name & (mask)); \
10171                 ret = false; \
10172         } \
10173 } while (0)
10174
10175 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
10176         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10177                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
10178                                      "(expected %i, found %i)", \
10179                                      current_config->name, \
10180                                      pipe_config->name); \
10181                 ret = false; \
10182         } \
10183 } while (0)
10184
10185 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
10186         if (!intel_compare_infoframe(&current_config->infoframes.name, \
10187                                      &pipe_config->infoframes.name)) { \
10188                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
10189                                                &current_config->infoframes.name, \
10190                                                &pipe_config->infoframes.name); \
10191                 ret = false; \
10192         } \
10193 } while (0)
10194
10195 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
10196         if (!current_config->has_psr && !pipe_config->has_psr && \
10197             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
10198                                       &pipe_config->infoframes.name)) { \
10199                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
10200                                                 &current_config->infoframes.name, \
10201                                                 &pipe_config->infoframes.name); \
10202                 ret = false; \
10203         } \
10204 } while (0)
10205
10206 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
10207         if (current_config->name1 != pipe_config->name1) { \
10208                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
10209                                 "(expected %i, found %i, won't compare lut values)", \
10210                                 current_config->name1, \
10211                                 pipe_config->name1); \
10212                 ret = false;\
10213         } else { \
10214                 if (!intel_color_lut_equal(current_config->name2, \
10215                                         pipe_config->name2, pipe_config->name1, \
10216                                         bit_precision)) { \
10217                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
10218                                         "hw_state doesn't match sw_state"); \
10219                         ret = false; \
10220                 } \
10221         } \
10222 } while (0)
10223
10224 #define PIPE_CONF_QUIRK(quirk) \
10225         ((current_config->quirks | pipe_config->quirks) & (quirk))
10226
10227         PIPE_CONF_CHECK_I(cpu_transcoder);
10228
10229         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
10230         PIPE_CONF_CHECK_I(fdi_lanes);
10231         PIPE_CONF_CHECK_M_N(fdi_m_n);
10232
10233         PIPE_CONF_CHECK_I(lane_count);
10234         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
10235
10236         if (INTEL_GEN(dev_priv) < 8) {
10237                 PIPE_CONF_CHECK_M_N(dp_m_n);
10238
10239                 if (current_config->has_drrs)
10240                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
10241         } else
10242                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
10243
10244         PIPE_CONF_CHECK_X(output_types);
10245
10246         /* FIXME do the readout properly and get rid of this quirk */
10247         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
10248                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
10249                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
10250                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
10251                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
10252                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
10253                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
10254
10255                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
10256                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
10257                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
10258                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
10259                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
10260                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
10261
10262                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
10263                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
10264                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
10265                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
10266                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
10267                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
10268
10269                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
10270                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
10271                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
10272                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
10273                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
10274                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
10275
10276                 PIPE_CONF_CHECK_I(pixel_multiplier);
10277
10278                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10279                                       DRM_MODE_FLAG_INTERLACE);
10280
10281                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10282                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10283                                               DRM_MODE_FLAG_PHSYNC);
10284                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10285                                               DRM_MODE_FLAG_NHSYNC);
10286                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10287                                               DRM_MODE_FLAG_PVSYNC);
10288                         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
10289                                               DRM_MODE_FLAG_NVSYNC);
10290                 }
10291         }
10292
10293         PIPE_CONF_CHECK_I(output_format);
10294         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
10295         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
10296             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10297                 PIPE_CONF_CHECK_BOOL(limited_color_range);
10298
10299         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
10300         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
10301         PIPE_CONF_CHECK_BOOL(has_infoframe);
10302         /* FIXME do the readout properly and get rid of this quirk */
10303         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
10304                 PIPE_CONF_CHECK_BOOL(fec_enable);
10305
10306         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
10307
10308         PIPE_CONF_CHECK_X(gmch_pfit.control);
10309         /* pfit ratios are autocomputed by the hw on gen4+ */
10310         if (INTEL_GEN(dev_priv) < 4)
10311                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
10312         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
10313
10314         /*
10315          * Changing the EDP transcoder input mux
10316          * (A_ONOFF vs. A_ON) requires a full modeset.
10317          */
10318         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
10319
10320         if (!fastset) {
10321                 PIPE_CONF_CHECK_I(pipe_src_w);
10322                 PIPE_CONF_CHECK_I(pipe_src_h);
10323
10324                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
10325                 if (current_config->pch_pfit.enabled) {
10326                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
10327                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
10328                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
10329                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
10330                 }
10331
10332                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
10333                 /* FIXME do the readout properly and get rid of this quirk */
10334                 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
10335                         PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
10336
10337                 PIPE_CONF_CHECK_X(gamma_mode);
10338                 if (IS_CHERRYVIEW(dev_priv))
10339                         PIPE_CONF_CHECK_X(cgm_mode);
10340                 else
10341                         PIPE_CONF_CHECK_X(csc_mode);
10342                 PIPE_CONF_CHECK_BOOL(gamma_enable);
10343                 PIPE_CONF_CHECK_BOOL(csc_enable);
10344
10345                 PIPE_CONF_CHECK_I(linetime);
10346                 PIPE_CONF_CHECK_I(ips_linetime);
10347
10348                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
10349                 if (bp_gamma)
10350                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
10351         }
10352
10353         PIPE_CONF_CHECK_BOOL(double_wide);
10354
10355         PIPE_CONF_CHECK_P(shared_dpll);
10356
10357         /* FIXME do the readout properly and get rid of this quirk */
10358         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
10359                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10360                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10361                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10362                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10363                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10364                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
10365                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10366                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10367                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10368                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
10369                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
10370                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
10371                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
10372                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
10373                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
10374                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
10375                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
10376                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
10377                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
10378                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
10379                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
10380                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
10381                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
10382                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
10383                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
10384                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
10385                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
10386                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
10387                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
10388                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
10389                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
10390
10391                 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
10392                 PIPE_CONF_CHECK_X(dsi_pll.div);
10393
10394                 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
10395                         PIPE_CONF_CHECK_I(pipe_bpp);
10396
10397                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
10398                 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
10399                 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10400
10401                 PIPE_CONF_CHECK_I(min_voltage_level);
10402         }
10403
10404         PIPE_CONF_CHECK_X(infoframes.enable);
10405         PIPE_CONF_CHECK_X(infoframes.gcp);
10406         PIPE_CONF_CHECK_INFOFRAME(avi);
10407         PIPE_CONF_CHECK_INFOFRAME(spd);
10408         PIPE_CONF_CHECK_INFOFRAME(hdmi);
10409         PIPE_CONF_CHECK_INFOFRAME(drm);
10410         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
10411
10412         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
10413         PIPE_CONF_CHECK_I(master_transcoder);
10414         PIPE_CONF_CHECK_BOOL(bigjoiner);
10415         PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
10416         PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
10417
10418         PIPE_CONF_CHECK_I(dsc.compression_enable);
10419         PIPE_CONF_CHECK_I(dsc.dsc_split);
10420         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
10421
10422         PIPE_CONF_CHECK_I(mst_master_transcoder);
10423
10424         PIPE_CONF_CHECK_BOOL(vrr.enable);
10425         PIPE_CONF_CHECK_I(vrr.vmin);
10426         PIPE_CONF_CHECK_I(vrr.vmax);
10427         PIPE_CONF_CHECK_I(vrr.flipline);
10428         PIPE_CONF_CHECK_I(vrr.pipeline_full);
10429
10430 #undef PIPE_CONF_CHECK_X
10431 #undef PIPE_CONF_CHECK_I
10432 #undef PIPE_CONF_CHECK_BOOL
10433 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
10434 #undef PIPE_CONF_CHECK_P
10435 #undef PIPE_CONF_CHECK_FLAGS
10436 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
10437 #undef PIPE_CONF_CHECK_COLOR_LUT
10438 #undef PIPE_CONF_QUIRK
10439
10440         return ret;
10441 }
10442
10443 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
10444                                            const struct intel_crtc_state *pipe_config)
10445 {
10446         if (pipe_config->has_pch_encoder) {
10447                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10448                                                             &pipe_config->fdi_m_n);
10449                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
10450
10451                 /*
10452                  * FDI already provided one idea for the dotclock.
10453                  * Yell if the encoder disagrees.
10454                  */
10455                 drm_WARN(&dev_priv->drm,
10456                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
10457                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10458                          fdi_dotclock, dotclock);
10459         }
10460 }
10461
10462 static void verify_wm_state(struct intel_crtc *crtc,
10463                             struct intel_crtc_state *new_crtc_state)
10464 {
10465         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10466         struct skl_hw_state {
10467                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
10468                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
10469                 struct skl_pipe_wm wm;
10470         } *hw;
10471         struct skl_pipe_wm *sw_wm;
10472         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
10473         u8 hw_enabled_slices;
10474         const enum pipe pipe = crtc->pipe;
10475         int plane, level, max_level = ilk_wm_max_level(dev_priv);
10476
10477         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
10478                 return;
10479
10480         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
10481         if (!hw)
10482                 return;
10483
10484         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
10485         sw_wm = &new_crtc_state->wm.skl.optimal;
10486
10487         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
10488
10489         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
10490
10491         if (INTEL_GEN(dev_priv) >= 11 &&
10492             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
10493                 drm_err(&dev_priv->drm,
10494                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
10495                         dev_priv->dbuf.enabled_slices,
10496                         hw_enabled_slices);
10497
10498         /* planes */
10499         for_each_universal_plane(dev_priv, pipe, plane) {
10500                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
10501
10502                 hw_plane_wm = &hw->wm.planes[plane];
10503                 sw_plane_wm = &sw_wm->planes[plane];
10504
10505                 /* Watermarks */
10506                 for (level = 0; level <= max_level; level++) {
10507                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
10508                                                 &sw_plane_wm->wm[level]) ||
10509                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
10510                                                                &sw_plane_wm->sagv_wm0)))
10511                                 continue;
10512
10513                         drm_err(&dev_priv->drm,
10514                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10515                                 pipe_name(pipe), plane + 1, level,
10516                                 sw_plane_wm->wm[level].plane_en,
10517                                 sw_plane_wm->wm[level].plane_res_b,
10518                                 sw_plane_wm->wm[level].plane_res_l,
10519                                 hw_plane_wm->wm[level].plane_en,
10520                                 hw_plane_wm->wm[level].plane_res_b,
10521                                 hw_plane_wm->wm[level].plane_res_l);
10522                 }
10523
10524                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
10525                                          &sw_plane_wm->trans_wm)) {
10526                         drm_err(&dev_priv->drm,
10527                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10528                                 pipe_name(pipe), plane + 1,
10529                                 sw_plane_wm->trans_wm.plane_en,
10530                                 sw_plane_wm->trans_wm.plane_res_b,
10531                                 sw_plane_wm->trans_wm.plane_res_l,
10532                                 hw_plane_wm->trans_wm.plane_en,
10533                                 hw_plane_wm->trans_wm.plane_res_b,
10534                                 hw_plane_wm->trans_wm.plane_res_l);
10535                 }
10536
10537                 /* DDB */
10538                 hw_ddb_entry = &hw->ddb_y[plane];
10539                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
10540
10541                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
10542                         drm_err(&dev_priv->drm,
10543                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
10544                                 pipe_name(pipe), plane + 1,
10545                                 sw_ddb_entry->start, sw_ddb_entry->end,
10546                                 hw_ddb_entry->start, hw_ddb_entry->end);
10547                 }
10548         }
10549
10550         /*
10551          * cursor
10552          * If the cursor plane isn't active, we may not have updated it's ddb
10553          * allocation. In that case since the ddb allocation will be updated
10554          * once the plane becomes visible, we can skip this check
10555          */
10556         if (1) {
10557                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
10558
10559                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
10560                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
10561
10562                 /* Watermarks */
10563                 for (level = 0; level <= max_level; level++) {
10564                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
10565                                                 &sw_plane_wm->wm[level]) ||
10566                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
10567                                                                &sw_plane_wm->sagv_wm0)))
10568                                 continue;
10569
10570                         drm_err(&dev_priv->drm,
10571                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10572                                 pipe_name(pipe), level,
10573                                 sw_plane_wm->wm[level].plane_en,
10574                                 sw_plane_wm->wm[level].plane_res_b,
10575                                 sw_plane_wm->wm[level].plane_res_l,
10576                                 hw_plane_wm->wm[level].plane_en,
10577                                 hw_plane_wm->wm[level].plane_res_b,
10578                                 hw_plane_wm->wm[level].plane_res_l);
10579                 }
10580
10581                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
10582                                          &sw_plane_wm->trans_wm)) {
10583                         drm_err(&dev_priv->drm,
10584                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
10585                                 pipe_name(pipe),
10586                                 sw_plane_wm->trans_wm.plane_en,
10587                                 sw_plane_wm->trans_wm.plane_res_b,
10588                                 sw_plane_wm->trans_wm.plane_res_l,
10589                                 hw_plane_wm->trans_wm.plane_en,
10590                                 hw_plane_wm->trans_wm.plane_res_b,
10591                                 hw_plane_wm->trans_wm.plane_res_l);
10592                 }
10593
10594                 /* DDB */
10595                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
10596                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
10597
10598                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
10599                         drm_err(&dev_priv->drm,
10600                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
10601                                 pipe_name(pipe),
10602                                 sw_ddb_entry->start, sw_ddb_entry->end,
10603                                 hw_ddb_entry->start, hw_ddb_entry->end);
10604                 }
10605         }
10606
10607         kfree(hw);
10608 }
10609
10610 static void
10611 verify_connector_state(struct intel_atomic_state *state,
10612                        struct intel_crtc *crtc)
10613 {
10614         struct drm_connector *connector;
10615         struct drm_connector_state *new_conn_state;
10616         int i;
10617
10618         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
10619                 struct drm_encoder *encoder = connector->encoder;
10620                 struct intel_crtc_state *crtc_state = NULL;
10621
10622                 if (new_conn_state->crtc != &crtc->base)
10623                         continue;
10624
10625                 if (crtc)
10626                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
10627
10628                 intel_connector_verify_state(crtc_state, new_conn_state);
10629
10630                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
10631                      "connector's atomic encoder doesn't match legacy encoder\n");
10632         }
10633 }
10634
10635 static void
10636 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
10637 {
10638         struct intel_encoder *encoder;
10639         struct drm_connector *connector;
10640         struct drm_connector_state *old_conn_state, *new_conn_state;
10641         int i;
10642
10643         for_each_intel_encoder(&dev_priv->drm, encoder) {
10644                 bool enabled = false, found = false;
10645                 enum pipe pipe;
10646
10647                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
10648                             encoder->base.base.id,
10649                             encoder->base.name);
10650
10651                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
10652                                                    new_conn_state, i) {
10653                         if (old_conn_state->best_encoder == &encoder->base)
10654                                 found = true;
10655
10656                         if (new_conn_state->best_encoder != &encoder->base)
10657                                 continue;
10658                         found = enabled = true;
10659
10660                         I915_STATE_WARN(new_conn_state->crtc !=
10661                                         encoder->base.crtc,
10662                              "connector's crtc doesn't match encoder crtc\n");
10663                 }
10664
10665                 if (!found)
10666                         continue;
10667
10668                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
10669                      "encoder's enabled state mismatch "
10670                      "(expected %i, found %i)\n",
10671                      !!encoder->base.crtc, enabled);
10672
10673                 if (!encoder->base.crtc) {
10674                         bool active;
10675
10676                         active = encoder->get_hw_state(encoder, &pipe);
10677                         I915_STATE_WARN(active,
10678                              "encoder detached but still enabled on pipe %c.\n",
10679                              pipe_name(pipe));
10680                 }
10681         }
10682 }
10683
10684 static void
10685 verify_crtc_state(struct intel_crtc *crtc,
10686                   struct intel_crtc_state *old_crtc_state,
10687                   struct intel_crtc_state *new_crtc_state)
10688 {
10689         struct drm_device *dev = crtc->base.dev;
10690         struct drm_i915_private *dev_priv = to_i915(dev);
10691         struct intel_encoder *encoder;
10692         struct intel_crtc_state *pipe_config = old_crtc_state;
10693         struct drm_atomic_state *state = old_crtc_state->uapi.state;
10694         struct intel_crtc *master = crtc;
10695
10696         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
10697         intel_crtc_free_hw_state(old_crtc_state);
10698         intel_crtc_state_reset(old_crtc_state, crtc);
10699         old_crtc_state->uapi.state = state;
10700
10701         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
10702                     crtc->base.name);
10703
10704         pipe_config->hw.enable = new_crtc_state->hw.enable;
10705
10706         intel_crtc_get_pipe_config(pipe_config);
10707
10708         /* we keep both pipes enabled on 830 */
10709         if (IS_I830(dev_priv) && pipe_config->hw.active)
10710                 pipe_config->hw.active = new_crtc_state->hw.active;
10711
10712         I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
10713                         "crtc active state doesn't match with hw state "
10714                         "(expected %i, found %i)\n",
10715                         new_crtc_state->hw.active, pipe_config->hw.active);
10716
10717         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
10718                         "transitional active state does not match atomic hw state "
10719                         "(expected %i, found %i)\n",
10720                         new_crtc_state->hw.active, crtc->active);
10721
10722         if (new_crtc_state->bigjoiner_slave)
10723                 master = new_crtc_state->bigjoiner_linked_crtc;
10724
10725         for_each_encoder_on_crtc(dev, &master->base, encoder) {
10726                 enum pipe pipe;
10727                 bool active;
10728
10729                 active = encoder->get_hw_state(encoder, &pipe);
10730                 I915_STATE_WARN(active != new_crtc_state->hw.active,
10731                                 "[ENCODER:%i] active %i with crtc active %i\n",
10732                                 encoder->base.base.id, active,
10733                                 new_crtc_state->hw.active);
10734
10735                 I915_STATE_WARN(active && master->pipe != pipe,
10736                                 "Encoder connected to wrong pipe %c\n",
10737                                 pipe_name(pipe));
10738
10739                 if (active)
10740                         intel_encoder_get_config(encoder, pipe_config);
10741         }
10742
10743         if (!new_crtc_state->hw.active)
10744                 return;
10745
10746         intel_pipe_config_sanity_check(dev_priv, pipe_config);
10747
10748         if (!intel_pipe_config_compare(new_crtc_state,
10749                                        pipe_config, false)) {
10750                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
10751                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
10752                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
10753         }
10754 }
10755
10756 static void
10757 intel_verify_planes(struct intel_atomic_state *state)
10758 {
10759         struct intel_plane *plane;
10760         const struct intel_plane_state *plane_state;
10761         int i;
10762
10763         for_each_new_intel_plane_in_state(state, plane,
10764                                           plane_state, i)
10765                 assert_plane(plane, plane_state->planar_slave ||
10766                              plane_state->uapi.visible);
10767 }
10768
10769 static void
10770 verify_single_dpll_state(struct drm_i915_private *dev_priv,
10771                          struct intel_shared_dpll *pll,
10772                          struct intel_crtc *crtc,
10773                          struct intel_crtc_state *new_crtc_state)
10774 {
10775         struct intel_dpll_hw_state dpll_hw_state;
10776         unsigned int crtc_mask;
10777         bool active;
10778
10779         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10780
10781         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
10782
10783         active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
10784
10785         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
10786                 I915_STATE_WARN(!pll->on && pll->active_mask,
10787                      "pll in active use but not on in sw tracking\n");
10788                 I915_STATE_WARN(pll->on && !pll->active_mask,
10789                      "pll is on but not used by any active crtc\n");
10790                 I915_STATE_WARN(pll->on != active,
10791                      "pll on state mismatch (expected %i, found %i)\n",
10792                      pll->on, active);
10793         }
10794
10795         if (!crtc) {
10796                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
10797                                 "more active pll users than references: %x vs %x\n",
10798                                 pll->active_mask, pll->state.crtc_mask);
10799
10800                 return;
10801         }
10802
10803         crtc_mask = drm_crtc_mask(&crtc->base);
10804
10805         if (new_crtc_state->hw.active)
10806                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
10807                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
10808                                 pipe_name(crtc->pipe), pll->active_mask);
10809         else
10810                 I915_STATE_WARN(pll->active_mask & crtc_mask,
10811                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
10812                                 pipe_name(crtc->pipe), pll->active_mask);
10813
10814         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
10815                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
10816                         crtc_mask, pll->state.crtc_mask);
10817
10818         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
10819                                           &dpll_hw_state,
10820                                           sizeof(dpll_hw_state)),
10821                         "pll hw state mismatch\n");
10822 }
10823
10824 static void
10825 verify_shared_dpll_state(struct intel_crtc *crtc,
10826                          struct intel_crtc_state *old_crtc_state,
10827                          struct intel_crtc_state *new_crtc_state)
10828 {
10829         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10830
10831         if (new_crtc_state->shared_dpll)
10832                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
10833
10834         if (old_crtc_state->shared_dpll &&
10835             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
10836                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
10837                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
10838
10839                 I915_STATE_WARN(pll->active_mask & crtc_mask,
10840                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
10841                                 pipe_name(crtc->pipe));
10842                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
10843                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
10844                                 pipe_name(crtc->pipe));
10845         }
10846 }
10847
10848 static void
10849 intel_modeset_verify_crtc(struct intel_crtc *crtc,
10850                           struct intel_atomic_state *state,
10851                           struct intel_crtc_state *old_crtc_state,
10852                           struct intel_crtc_state *new_crtc_state)
10853 {
10854         if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
10855                 return;
10856
10857         verify_wm_state(crtc, new_crtc_state);
10858         verify_connector_state(state, crtc);
10859         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
10860         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
10861 }
10862
10863 static void
10864 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
10865 {
10866         int i;
10867
10868         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
10869                 verify_single_dpll_state(dev_priv,
10870                                          &dev_priv->dpll.shared_dplls[i],
10871                                          NULL, NULL);
10872 }
10873
10874 static void
10875 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
10876                               struct intel_atomic_state *state)
10877 {
10878         verify_encoder_state(dev_priv, state);
10879         verify_connector_state(state, NULL);
10880         verify_disabled_dpll_state(dev_priv);
10881 }
10882
10883 static void
10884 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
10885 {
10886         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10887         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10888         struct drm_display_mode adjusted_mode =
10889                 crtc_state->hw.adjusted_mode;
10890
10891         if (crtc_state->vrr.enable) {
10892                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
10893                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
10894                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
10895                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
10896         }
10897
10898         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
10899
10900         crtc->mode_flags = crtc_state->mode_flags;
10901
10902         /*
10903          * The scanline counter increments at the leading edge of hsync.
10904          *
10905          * On most platforms it starts counting from vtotal-1 on the
10906          * first active line. That means the scanline counter value is
10907          * always one less than what we would expect. Ie. just after
10908          * start of vblank, which also occurs at start of hsync (on the
10909          * last active line), the scanline counter will read vblank_start-1.
10910          *
10911          * On gen2 the scanline counter starts counting from 1 instead
10912          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10913          * to keep the value positive), instead of adding one.
10914          *
10915          * On HSW+ the behaviour of the scanline counter depends on the output
10916          * type. For DP ports it behaves like most other platforms, but on HDMI
10917          * there's an extra 1 line difference. So we need to add two instead of
10918          * one to the value.
10919          *
10920          * On VLV/CHV DSI the scanline counter would appear to increment
10921          * approx. 1/3 of a scanline before start of vblank. Unfortunately
10922          * that means we can't tell whether we're in vblank or not while
10923          * we're on that particular line. We must still set scanline_offset
10924          * to 1 so that the vblank timestamps come out correct when we query
10925          * the scanline counter from within the vblank interrupt handler.
10926          * However if queried just before the start of vblank we'll get an
10927          * answer that's slightly in the future.
10928          */
10929         if (IS_GEN(dev_priv, 2)) {
10930                 int vtotal;
10931
10932                 vtotal = adjusted_mode.crtc_vtotal;
10933                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10934                         vtotal /= 2;
10935
10936                 crtc->scanline_offset = vtotal - 1;
10937         } else if (HAS_DDI(dev_priv) &&
10938                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
10939                 crtc->scanline_offset = 2;
10940         } else {
10941                 crtc->scanline_offset = 1;
10942         }
10943 }
10944
10945 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
10946 {
10947         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10948         struct intel_crtc_state *new_crtc_state;
10949         struct intel_crtc *crtc;
10950         int i;
10951
10952         if (!dev_priv->display.crtc_compute_clock)
10953                 return;
10954
10955         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10956                 if (!intel_crtc_needs_modeset(new_crtc_state))
10957                         continue;
10958
10959                 intel_release_shared_dplls(state, crtc);
10960         }
10961 }
10962
10963 /*
10964  * This implements the workaround described in the "notes" section of the mode
10965  * set sequence documentation. When going from no pipes or single pipe to
10966  * multiple pipes, and planes are enabled after the pipe, we need to wait at
10967  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
10968  */
10969 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
10970 {
10971         struct intel_crtc_state *crtc_state;
10972         struct intel_crtc *crtc;
10973         struct intel_crtc_state *first_crtc_state = NULL;
10974         struct intel_crtc_state *other_crtc_state = NULL;
10975         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
10976         int i;
10977
10978         /* look at all crtc's that are going to be enabled in during modeset */
10979         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10980                 if (!crtc_state->hw.active ||
10981                     !intel_crtc_needs_modeset(crtc_state))
10982                         continue;
10983
10984                 if (first_crtc_state) {
10985                         other_crtc_state = crtc_state;
10986                         break;
10987                 } else {
10988                         first_crtc_state = crtc_state;
10989                         first_pipe = crtc->pipe;
10990                 }
10991         }
10992
10993         /* No workaround needed? */
10994         if (!first_crtc_state)
10995                 return 0;
10996
10997         /* w/a possibly needed, check how many crtc's are already enabled. */
10998         for_each_intel_crtc(state->base.dev, crtc) {
10999                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
11000                 if (IS_ERR(crtc_state))
11001                         return PTR_ERR(crtc_state);
11002
11003                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
11004
11005                 if (!crtc_state->hw.active ||
11006                     intel_crtc_needs_modeset(crtc_state))
11007                         continue;
11008
11009                 /* 2 or more enabled crtcs means no need for w/a */
11010                 if (enabled_pipe != INVALID_PIPE)
11011                         return 0;
11012
11013                 enabled_pipe = crtc->pipe;
11014         }
11015
11016         if (enabled_pipe != INVALID_PIPE)
11017                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
11018         else if (other_crtc_state)
11019                 other_crtc_state->hsw_workaround_pipe = first_pipe;
11020
11021         return 0;
11022 }
11023
11024 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
11025                            u8 active_pipes)
11026 {
11027         const struct intel_crtc_state *crtc_state;
11028         struct intel_crtc *crtc;
11029         int i;
11030
11031         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11032                 if (crtc_state->hw.active)
11033                         active_pipes |= BIT(crtc->pipe);
11034                 else
11035                         active_pipes &= ~BIT(crtc->pipe);
11036         }
11037
11038         return active_pipes;
11039 }
11040
11041 static int intel_modeset_checks(struct intel_atomic_state *state)
11042 {
11043         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11044
11045         state->modeset = true;
11046
11047         if (IS_HASWELL(dev_priv))
11048                 return hsw_mode_set_planes_workaround(state);
11049
11050         return 0;
11051 }
11052
11053 /*
11054  * Handle calculation of various watermark data at the end of the atomic check
11055  * phase.  The code here should be run after the per-crtc and per-plane 'check'
11056  * handlers to ensure that all derived state has been updated.
11057  */
11058 static int calc_watermark_data(struct intel_atomic_state *state)
11059 {
11060         struct drm_device *dev = state->base.dev;
11061         struct drm_i915_private *dev_priv = to_i915(dev);
11062
11063         /* Is there platform-specific watermark information to calculate? */
11064         if (dev_priv->display.compute_global_watermarks)
11065                 return dev_priv->display.compute_global_watermarks(state);
11066
11067         return 0;
11068 }
11069
11070 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
11071                                      struct intel_crtc_state *new_crtc_state)
11072 {
11073         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
11074                 return;
11075
11076         new_crtc_state->uapi.mode_changed = false;
11077         new_crtc_state->update_pipe = true;
11078 }
11079
11080 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
11081                                     struct intel_crtc_state *new_crtc_state)
11082 {
11083         /*
11084          * If we're not doing the full modeset we want to
11085          * keep the current M/N values as they may be
11086          * sufficiently different to the computed values
11087          * to cause problems.
11088          *
11089          * FIXME: should really copy more fuzzy state here
11090          */
11091         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
11092         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
11093         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
11094         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
11095 }
11096
11097 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
11098                                           struct intel_crtc *crtc,
11099                                           u8 plane_ids_mask)
11100 {
11101         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11102         struct intel_plane *plane;
11103
11104         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
11105                 struct intel_plane_state *plane_state;
11106
11107                 if ((plane_ids_mask & BIT(plane->id)) == 0)
11108                         continue;
11109
11110                 plane_state = intel_atomic_get_plane_state(state, plane);
11111                 if (IS_ERR(plane_state))
11112                         return PTR_ERR(plane_state);
11113         }
11114
11115         return 0;
11116 }
11117
11118 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
11119                                      struct intel_crtc *crtc)
11120 {
11121         const struct intel_crtc_state *old_crtc_state =
11122                 intel_atomic_get_old_crtc_state(state, crtc);
11123         const struct intel_crtc_state *new_crtc_state =
11124                 intel_atomic_get_new_crtc_state(state, crtc);
11125
11126         return intel_crtc_add_planes_to_state(state, crtc,
11127                                               old_crtc_state->enabled_planes |
11128                                               new_crtc_state->enabled_planes);
11129 }
11130
11131 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
11132 {
11133         /* See {hsw,vlv,ivb}_plane_ratio() */
11134         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
11135                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11136                 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
11137 }
11138
11139 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
11140                                            struct intel_crtc *crtc,
11141                                            struct intel_crtc *other)
11142 {
11143         const struct intel_plane_state *plane_state;
11144         struct intel_plane *plane;
11145         u8 plane_ids = 0;
11146         int i;
11147
11148         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11149                 if (plane->pipe == crtc->pipe)
11150                         plane_ids |= BIT(plane->id);
11151         }
11152
11153         return intel_crtc_add_planes_to_state(state, other, plane_ids);
11154 }
11155
11156 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
11157 {
11158         const struct intel_crtc_state *crtc_state;
11159         struct intel_crtc *crtc;
11160         int i;
11161
11162         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11163                 int ret;
11164
11165                 if (!crtc_state->bigjoiner)
11166                         continue;
11167
11168                 ret = intel_crtc_add_bigjoiner_planes(state, crtc,
11169                                                       crtc_state->bigjoiner_linked_crtc);
11170                 if (ret)
11171                         return ret;
11172         }
11173
11174         return 0;
11175 }
11176
11177 static int intel_atomic_check_planes(struct intel_atomic_state *state)
11178 {
11179         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11180         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11181         struct intel_plane_state *plane_state;
11182         struct intel_plane *plane;
11183         struct intel_crtc *crtc;
11184         int i, ret;
11185
11186         ret = icl_add_linked_planes(state);
11187         if (ret)
11188                 return ret;
11189
11190         ret = intel_bigjoiner_add_affected_planes(state);
11191         if (ret)
11192                 return ret;
11193
11194         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11195                 ret = intel_plane_atomic_check(state, plane);
11196                 if (ret) {
11197                         drm_dbg_atomic(&dev_priv->drm,
11198                                        "[PLANE:%d:%s] atomic driver check failed\n",
11199                                        plane->base.base.id, plane->base.name);
11200                         return ret;
11201                 }
11202         }
11203
11204         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11205                                             new_crtc_state, i) {
11206                 u8 old_active_planes, new_active_planes;
11207
11208                 ret = icl_check_nv12_planes(new_crtc_state);
11209                 if (ret)
11210                         return ret;
11211
11212                 /*
11213                  * On some platforms the number of active planes affects
11214                  * the planes' minimum cdclk calculation. Add such planes
11215                  * to the state before we compute the minimum cdclk.
11216                  */
11217                 if (!active_planes_affects_min_cdclk(dev_priv))
11218                         continue;
11219
11220                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
11221                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
11222
11223                 /*
11224                  * Not only the number of planes, but if the plane configuration had
11225                  * changed might already mean we need to recompute min CDCLK,
11226                  * because different planes might consume different amount of Dbuf bandwidth
11227                  * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
11228                  */
11229                 if (old_active_planes == new_active_planes)
11230                         continue;
11231
11232                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
11233                 if (ret)
11234                         return ret;
11235         }
11236
11237         return 0;
11238 }
11239
11240 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
11241                                     bool *need_cdclk_calc)
11242 {
11243         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11244         const struct intel_cdclk_state *old_cdclk_state;
11245         const struct intel_cdclk_state *new_cdclk_state;
11246         struct intel_plane_state *plane_state;
11247         struct intel_bw_state *new_bw_state;
11248         struct intel_plane *plane;
11249         int min_cdclk = 0;
11250         enum pipe pipe;
11251         int ret;
11252         int i;
11253         /*
11254          * active_planes bitmask has been updated, and potentially
11255          * affected planes are part of the state. We can now
11256          * compute the minimum cdclk for each plane.
11257          */
11258         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11259                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
11260                 if (ret)
11261                         return ret;
11262         }
11263
11264         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
11265         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
11266
11267         if (new_cdclk_state &&
11268             old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
11269                 *need_cdclk_calc = true;
11270
11271         ret = dev_priv->display.bw_calc_min_cdclk(state);
11272         if (ret)
11273                 return ret;
11274
11275         new_bw_state = intel_atomic_get_new_bw_state(state);
11276
11277         if (!new_cdclk_state || !new_bw_state)
11278                 return 0;
11279
11280         for_each_pipe(dev_priv, pipe) {
11281                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
11282
11283                 /*
11284                  * Currently do this change only if we need to increase
11285                  */
11286                 if (new_bw_state->min_cdclk > min_cdclk)
11287                         *need_cdclk_calc = true;
11288         }
11289
11290         return 0;
11291 }
11292
11293 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
11294 {
11295         struct intel_crtc_state *crtc_state;
11296         struct intel_crtc *crtc;
11297         int i;
11298
11299         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11300                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
11301                 int ret;
11302
11303                 ret = intel_crtc_atomic_check(state, crtc);
11304                 if (ret) {
11305                         drm_dbg_atomic(&i915->drm,
11306                                        "[CRTC:%d:%s] atomic driver check failed\n",
11307                                        crtc->base.base.id, crtc->base.name);
11308                         return ret;
11309                 }
11310         }
11311
11312         return 0;
11313 }
11314
11315 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
11316                                                u8 transcoders)
11317 {
11318         const struct intel_crtc_state *new_crtc_state;
11319         struct intel_crtc *crtc;
11320         int i;
11321
11322         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11323                 if (new_crtc_state->hw.enable &&
11324                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
11325                     intel_crtc_needs_modeset(new_crtc_state))
11326                         return true;
11327         }
11328
11329         return false;
11330 }
11331
11332 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
11333                                         struct intel_crtc *crtc,
11334                                         struct intel_crtc_state *old_crtc_state,
11335                                         struct intel_crtc_state *new_crtc_state)
11336 {
11337         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11338         struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
11339         struct intel_crtc *slave, *master;
11340
11341         /* slave being enabled, is master is still claiming this crtc? */
11342         if (old_crtc_state->bigjoiner_slave) {
11343                 slave = crtc;
11344                 master = old_crtc_state->bigjoiner_linked_crtc;
11345                 master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
11346                 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
11347                         goto claimed;
11348         }
11349
11350         if (!new_crtc_state->bigjoiner)
11351                 return 0;
11352
11353         if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
11354                 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
11355                               "CRTC + 1 to be used, doesn't exist\n",
11356                               crtc->base.base.id, crtc->base.name);
11357                 return -EINVAL;
11358         }
11359
11360         slave = new_crtc_state->bigjoiner_linked_crtc =
11361                 intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
11362         slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
11363         master = crtc;
11364         if (IS_ERR(slave_crtc_state))
11365                 return PTR_ERR(slave_crtc_state);
11366
11367         /* master being enabled, slave was already configured? */
11368         if (slave_crtc_state->uapi.enable)
11369                 goto claimed;
11370
11371         DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
11372                       slave->base.base.id, slave->base.name);
11373
11374         return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
11375
11376 claimed:
11377         DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
11378                       "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
11379                       slave->base.base.id, slave->base.name,
11380                       master->base.base.id, master->base.name);
11381         return -EINVAL;
11382 }
11383
11384 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
11385                                  struct intel_crtc_state *master_crtc_state)
11386 {
11387         struct intel_crtc_state *slave_crtc_state =
11388                 intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
11389
11390         slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
11391         slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
11392         slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
11393         intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
11394 }
11395
11396 /**
11397  * DOC: asynchronous flip implementation
11398  *
11399  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
11400  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
11401  * Correspondingly, support is currently added for primary plane only.
11402  *
11403  * Async flip can only change the plane surface address, so anything else
11404  * changing is rejected from the intel_atomic_check_async() function.
11405  * Once this check is cleared, flip done interrupt is enabled using
11406  * the intel_crtc_enable_flip_done() function.
11407  *
11408  * As soon as the surface address register is written, flip done interrupt is
11409  * generated and the requested events are sent to the usersapce in the interrupt
11410  * handler itself. The timestamp and sequence sent during the flip done event
11411  * correspond to the last vblank and have no relation to the actual time when
11412  * the flip done event was sent.
11413  */
11414 static int intel_atomic_check_async(struct intel_atomic_state *state)
11415 {
11416         struct drm_i915_private *i915 = to_i915(state->base.dev);
11417         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11418         const struct intel_plane_state *new_plane_state, *old_plane_state;
11419         struct intel_crtc *crtc;
11420         struct intel_plane *plane;
11421         int i;
11422
11423         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11424                                             new_crtc_state, i) {
11425                 if (intel_crtc_needs_modeset(new_crtc_state)) {
11426                         drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
11427                         return -EINVAL;
11428                 }
11429
11430                 if (!new_crtc_state->hw.active) {
11431                         drm_dbg_kms(&i915->drm, "CRTC inactive\n");
11432                         return -EINVAL;
11433                 }
11434                 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
11435                         drm_dbg_kms(&i915->drm,
11436                                     "Active planes cannot be changed during async flip\n");
11437                         return -EINVAL;
11438                 }
11439         }
11440
11441         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
11442                                              new_plane_state, i) {
11443                 /*
11444                  * TODO: Async flip is only supported through the page flip IOCTL
11445                  * as of now. So support currently added for primary plane only.
11446                  * Support for other planes on platforms on which supports
11447                  * this(vlv/chv and icl+) should be added when async flip is
11448                  * enabled in the atomic IOCTL path.
11449                  */
11450                 if (!plane->async_flip)
11451                         return -EINVAL;
11452
11453                 /*
11454                  * FIXME: This check is kept generic for all platforms.
11455                  * Need to verify this for all gen9 and gen10 platforms to enable
11456                  * this selectively if required.
11457                  */
11458                 switch (new_plane_state->hw.fb->modifier) {
11459                 case I915_FORMAT_MOD_X_TILED:
11460                 case I915_FORMAT_MOD_Y_TILED:
11461                 case I915_FORMAT_MOD_Yf_TILED:
11462                         break;
11463                 default:
11464                         drm_dbg_kms(&i915->drm,
11465                                     "Linear memory/CCS does not support async flips\n");
11466                         return -EINVAL;
11467                 }
11468
11469                 if (old_plane_state->color_plane[0].stride !=
11470                     new_plane_state->color_plane[0].stride) {
11471                         drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
11472                         return -EINVAL;
11473                 }
11474
11475                 if (old_plane_state->hw.fb->modifier !=
11476                     new_plane_state->hw.fb->modifier) {
11477                         drm_dbg_kms(&i915->drm,
11478                                     "Framebuffer modifiers cannot be changed in async flip\n");
11479                         return -EINVAL;
11480                 }
11481
11482                 if (old_plane_state->hw.fb->format !=
11483                     new_plane_state->hw.fb->format) {
11484                         drm_dbg_kms(&i915->drm,
11485                                     "Framebuffer format cannot be changed in async flip\n");
11486                         return -EINVAL;
11487                 }
11488
11489                 if (old_plane_state->hw.rotation !=
11490                     new_plane_state->hw.rotation) {
11491                         drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
11492                         return -EINVAL;
11493                 }
11494
11495                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
11496                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
11497                         drm_dbg_kms(&i915->drm,
11498                                     "Plane size/co-ordinates cannot be changed in async flip\n");
11499                         return -EINVAL;
11500                 }
11501
11502                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
11503                         drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
11504                         return -EINVAL;
11505                 }
11506
11507                 if (old_plane_state->hw.pixel_blend_mode !=
11508                     new_plane_state->hw.pixel_blend_mode) {
11509                         drm_dbg_kms(&i915->drm,
11510                                     "Pixel blend mode cannot be changed in async flip\n");
11511                         return -EINVAL;
11512                 }
11513
11514                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
11515                         drm_dbg_kms(&i915->drm,
11516                                     "Color encoding cannot be changed in async flip\n");
11517                         return -EINVAL;
11518                 }
11519
11520                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
11521                         drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
11522                         return -EINVAL;
11523                 }
11524         }
11525
11526         return 0;
11527 }
11528
11529 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
11530 {
11531         struct intel_crtc_state *crtc_state;
11532         struct intel_crtc *crtc;
11533         int i;
11534
11535         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11536                 struct intel_crtc_state *linked_crtc_state;
11537                 struct intel_crtc *linked_crtc;
11538                 int ret;
11539
11540                 if (!crtc_state->bigjoiner)
11541                         continue;
11542
11543                 linked_crtc = crtc_state->bigjoiner_linked_crtc;
11544                 linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
11545                 if (IS_ERR(linked_crtc_state))
11546                         return PTR_ERR(linked_crtc_state);
11547
11548                 if (!intel_crtc_needs_modeset(crtc_state))
11549                         continue;
11550
11551                 linked_crtc_state->uapi.mode_changed = true;
11552
11553                 ret = drm_atomic_add_affected_connectors(&state->base,
11554                                                          &linked_crtc->base);
11555                 if (ret)
11556                         return ret;
11557
11558                 ret = intel_atomic_add_affected_planes(state, linked_crtc);
11559                 if (ret)
11560                         return ret;
11561         }
11562
11563         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11564                 /* Kill old bigjoiner link, we may re-establish afterwards */
11565                 if (intel_crtc_needs_modeset(crtc_state) &&
11566                     crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
11567                         kill_bigjoiner_slave(state, crtc_state);
11568         }
11569
11570         return 0;
11571 }
11572
11573 /**
11574  * intel_atomic_check - validate state object
11575  * @dev: drm device
11576  * @_state: state to validate
11577  */
11578 static int intel_atomic_check(struct drm_device *dev,
11579                               struct drm_atomic_state *_state)
11580 {
11581         struct drm_i915_private *dev_priv = to_i915(dev);
11582         struct intel_atomic_state *state = to_intel_atomic_state(_state);
11583         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
11584         struct intel_crtc *crtc;
11585         int ret, i;
11586         bool any_ms = false;
11587
11588         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11589                                             new_crtc_state, i) {
11590                 if (new_crtc_state->inherited != old_crtc_state->inherited)
11591                         new_crtc_state->uapi.mode_changed = true;
11592         }
11593
11594         intel_vrr_check_modeset(state);
11595
11596         ret = drm_atomic_helper_check_modeset(dev, &state->base);
11597         if (ret)
11598                 goto fail;
11599
11600         ret = intel_bigjoiner_add_affected_crtcs(state);
11601         if (ret)
11602                 goto fail;
11603
11604         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11605                                             new_crtc_state, i) {
11606                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
11607                         /* Light copy */
11608                         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
11609
11610                         continue;
11611                 }
11612
11613                 if (!new_crtc_state->uapi.enable) {
11614                         if (!new_crtc_state->bigjoiner_slave) {
11615                                 intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
11616                                 any_ms = true;
11617                         }
11618                         continue;
11619                 }
11620
11621                 ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
11622                 if (ret)
11623                         goto fail;
11624
11625                 ret = intel_modeset_pipe_config(state, new_crtc_state);
11626                 if (ret)
11627                         goto fail;
11628
11629                 ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
11630                                                    new_crtc_state);
11631                 if (ret)
11632                         goto fail;
11633         }
11634
11635         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11636                                             new_crtc_state, i) {
11637                 if (!intel_crtc_needs_modeset(new_crtc_state))
11638                         continue;
11639
11640                 ret = intel_modeset_pipe_config_late(new_crtc_state);
11641                 if (ret)
11642                         goto fail;
11643
11644                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
11645         }
11646
11647         /**
11648          * Check if fastset is allowed by external dependencies like other
11649          * pipes and transcoders.
11650          *
11651          * Right now it only forces a fullmodeset when the MST master
11652          * transcoder did not changed but the pipe of the master transcoder
11653          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
11654          * in case of port synced crtcs, if one of the synced crtcs
11655          * needs a full modeset, all other synced crtcs should be
11656          * forced a full modeset.
11657          */
11658         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
11659                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
11660                         continue;
11661
11662                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
11663                         enum transcoder master = new_crtc_state->mst_master_transcoder;
11664
11665                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
11666                                 new_crtc_state->uapi.mode_changed = true;
11667                                 new_crtc_state->update_pipe = false;
11668                         }
11669                 }
11670
11671                 if (is_trans_port_sync_mode(new_crtc_state)) {
11672                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
11673
11674                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
11675                                 trans |= BIT(new_crtc_state->master_transcoder);
11676
11677                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
11678                                 new_crtc_state->uapi.mode_changed = true;
11679                                 new_crtc_state->update_pipe = false;
11680                         }
11681                 }
11682
11683                 if (new_crtc_state->bigjoiner) {
11684                         struct intel_crtc_state *linked_crtc_state =
11685                                 intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
11686
11687                         if (intel_crtc_needs_modeset(linked_crtc_state)) {
11688                                 new_crtc_state->uapi.mode_changed = true;
11689                                 new_crtc_state->update_pipe = false;
11690                         }
11691                 }
11692         }
11693
11694         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11695                                             new_crtc_state, i) {
11696                 if (intel_crtc_needs_modeset(new_crtc_state)) {
11697                         any_ms = true;
11698                         continue;
11699                 }
11700
11701                 if (!new_crtc_state->update_pipe)
11702                         continue;
11703
11704                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
11705         }
11706
11707         if (any_ms && !check_digital_port_conflicts(state)) {
11708                 drm_dbg_kms(&dev_priv->drm,
11709                             "rejecting conflicting digital port configuration\n");
11710                 ret = -EINVAL;
11711                 goto fail;
11712         }
11713
11714         ret = drm_dp_mst_atomic_check(&state->base);
11715         if (ret)
11716                 goto fail;
11717
11718         ret = intel_atomic_check_planes(state);
11719         if (ret)
11720                 goto fail;
11721
11722         intel_fbc_choose_crtc(dev_priv, state);
11723         ret = calc_watermark_data(state);
11724         if (ret)
11725                 goto fail;
11726
11727         ret = intel_bw_atomic_check(state);
11728         if (ret)
11729                 goto fail;
11730
11731         ret = intel_atomic_check_cdclk(state, &any_ms);
11732         if (ret)
11733                 goto fail;
11734
11735         if (any_ms) {
11736                 ret = intel_modeset_checks(state);
11737                 if (ret)
11738                         goto fail;
11739
11740                 ret = intel_modeset_calc_cdclk(state);
11741                 if (ret)
11742                         return ret;
11743
11744                 intel_modeset_clear_plls(state);
11745         }
11746
11747         ret = intel_atomic_check_crtcs(state);
11748         if (ret)
11749                 goto fail;
11750
11751         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11752                                             new_crtc_state, i) {
11753                 if (new_crtc_state->uapi.async_flip) {
11754                         ret = intel_atomic_check_async(state);
11755                         if (ret)
11756                                 goto fail;
11757                 }
11758
11759                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
11760                     !new_crtc_state->update_pipe)
11761                         continue;
11762
11763                 intel_dump_pipe_config(new_crtc_state, state,
11764                                        intel_crtc_needs_modeset(new_crtc_state) ?
11765                                        "[modeset]" : "[fastset]");
11766         }
11767
11768         return 0;
11769
11770  fail:
11771         if (ret == -EDEADLK)
11772                 return ret;
11773
11774         /*
11775          * FIXME would probably be nice to know which crtc specifically
11776          * caused the failure, in cases where we can pinpoint it.
11777          */
11778         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
11779                                             new_crtc_state, i)
11780                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
11781
11782         return ret;
11783 }
11784
11785 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
11786 {
11787         struct intel_crtc_state *crtc_state;
11788         struct intel_crtc *crtc;
11789         int i, ret;
11790
11791         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
11792         if (ret < 0)
11793                 return ret;
11794
11795         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
11796                 bool mode_changed = intel_crtc_needs_modeset(crtc_state);
11797
11798                 if (mode_changed || crtc_state->update_pipe ||
11799                     crtc_state->uapi.color_mgmt_changed) {
11800                         intel_dsb_prepare(crtc_state);
11801                 }
11802         }
11803
11804         return 0;
11805 }
11806
11807 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
11808                                   struct intel_crtc_state *crtc_state)
11809 {
11810         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11811
11812         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
11813                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
11814
11815         if (crtc_state->has_pch_encoder) {
11816                 enum pipe pch_transcoder =
11817                         intel_crtc_pch_transcoder(crtc);
11818
11819                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
11820         }
11821 }
11822
11823 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
11824                                const struct intel_crtc_state *new_crtc_state)
11825 {
11826         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
11827         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11828
11829         /*
11830          * Update pipe size and adjust fitter if needed: the reason for this is
11831          * that in compute_mode_changes we check the native mode (not the pfit
11832          * mode) to see if we can flip rather than do a full mode set. In the
11833          * fastboot case, we'll flip, but if we don't update the pipesrc and
11834          * pfit state, we'll end up with a big fb scanned out into the wrong
11835          * sized surface.
11836          */
11837         intel_set_pipe_src_size(new_crtc_state);
11838
11839         /* on skylake this is done by detaching scalers */
11840         if (INTEL_GEN(dev_priv) >= 9) {
11841                 skl_detach_scalers(new_crtc_state);
11842
11843                 if (new_crtc_state->pch_pfit.enabled)
11844                         skl_pfit_enable(new_crtc_state);
11845         } else if (HAS_PCH_SPLIT(dev_priv)) {
11846                 if (new_crtc_state->pch_pfit.enabled)
11847                         ilk_pfit_enable(new_crtc_state);
11848                 else if (old_crtc_state->pch_pfit.enabled)
11849                         ilk_pfit_disable(old_crtc_state);
11850         }
11851
11852         /*
11853          * The register is supposedly single buffered so perhaps
11854          * not 100% correct to do this here. But SKL+ calculate
11855          * this based on the adjust pixel rate so pfit changes do
11856          * affect it and so it must be updated for fastsets.
11857          * HSW/BDW only really need this here for fastboot, after
11858          * that the value should not change without a full modeset.
11859          */
11860         if (INTEL_GEN(dev_priv) >= 9 ||
11861             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11862                 hsw_set_linetime_wm(new_crtc_state);
11863
11864         if (INTEL_GEN(dev_priv) >= 11)
11865                 icl_set_pipe_chicken(crtc);
11866 }
11867
11868 static void commit_pipe_config(struct intel_atomic_state *state,
11869                                struct intel_crtc *crtc)
11870 {
11871         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11872         const struct intel_crtc_state *old_crtc_state =
11873                 intel_atomic_get_old_crtc_state(state, crtc);
11874         const struct intel_crtc_state *new_crtc_state =
11875                 intel_atomic_get_new_crtc_state(state, crtc);
11876         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11877
11878         /*
11879          * During modesets pipe configuration was programmed as the
11880          * CRTC was enabled.
11881          */
11882         if (!modeset) {
11883                 if (new_crtc_state->uapi.color_mgmt_changed ||
11884                     new_crtc_state->update_pipe)
11885                         intel_color_commit(new_crtc_state);
11886
11887                 if (INTEL_GEN(dev_priv) >= 9)
11888                         skl_detach_scalers(new_crtc_state);
11889
11890                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
11891                         bdw_set_pipemisc(new_crtc_state);
11892
11893                 if (new_crtc_state->update_pipe)
11894                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
11895
11896                 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
11897         }
11898
11899         if (dev_priv->display.atomic_update_watermarks)
11900                 dev_priv->display.atomic_update_watermarks(state, crtc);
11901 }
11902
11903 static void intel_enable_crtc(struct intel_atomic_state *state,
11904                               struct intel_crtc *crtc)
11905 {
11906         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11907         const struct intel_crtc_state *new_crtc_state =
11908                 intel_atomic_get_new_crtc_state(state, crtc);
11909
11910         if (!intel_crtc_needs_modeset(new_crtc_state))
11911                 return;
11912
11913         intel_crtc_update_active_timings(new_crtc_state);
11914
11915         dev_priv->display.crtc_enable(state, crtc);
11916
11917         if (new_crtc_state->bigjoiner_slave)
11918                 return;
11919
11920         /* vblanks work again, re-enable pipe CRC. */
11921         intel_crtc_enable_pipe_crc(crtc);
11922 }
11923
11924 static void intel_update_crtc(struct intel_atomic_state *state,
11925                               struct intel_crtc *crtc)
11926 {
11927         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11928         const struct intel_crtc_state *old_crtc_state =
11929                 intel_atomic_get_old_crtc_state(state, crtc);
11930         struct intel_crtc_state *new_crtc_state =
11931                 intel_atomic_get_new_crtc_state(state, crtc);
11932         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
11933
11934         if (!modeset) {
11935                 if (new_crtc_state->preload_luts &&
11936                     (new_crtc_state->uapi.color_mgmt_changed ||
11937                      new_crtc_state->update_pipe))
11938                         intel_color_load_luts(new_crtc_state);
11939
11940                 intel_pre_plane_update(state, crtc);
11941
11942                 if (new_crtc_state->update_pipe)
11943                         intel_encoders_update_pipe(state, crtc);
11944         }
11945
11946         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
11947                 intel_fbc_disable(crtc);
11948         else
11949                 intel_fbc_enable(state, crtc);
11950
11951         /* Perform vblank evasion around commit operation */
11952         intel_pipe_update_start(new_crtc_state);
11953
11954         commit_pipe_config(state, crtc);
11955
11956         if (INTEL_GEN(dev_priv) >= 9)
11957                 skl_update_planes_on_crtc(state, crtc);
11958         else
11959                 i9xx_update_planes_on_crtc(state, crtc);
11960
11961         intel_pipe_update_end(new_crtc_state);
11962
11963         /*
11964          * We usually enable FIFO underrun interrupts as part of the
11965          * CRTC enable sequence during modesets.  But when we inherit a
11966          * valid pipe configuration from the BIOS we need to take care
11967          * of enabling them on the CRTC's first fastset.
11968          */
11969         if (new_crtc_state->update_pipe && !modeset &&
11970             old_crtc_state->inherited)
11971                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
11972 }
11973
11974 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
11975                                           struct intel_crtc_state *old_crtc_state,
11976                                           struct intel_crtc_state *new_crtc_state,
11977                                           struct intel_crtc *crtc)
11978 {
11979         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
11980
11981         drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
11982
11983         intel_crtc_disable_planes(state, crtc);
11984
11985         /*
11986          * We still need special handling for disabling bigjoiner master
11987          * and slaves since for slave we do not have encoder or plls
11988          * so we dont need to disable those.
11989          */
11990         if (old_crtc_state->bigjoiner) {
11991                 intel_crtc_disable_planes(state,
11992                                           old_crtc_state->bigjoiner_linked_crtc);
11993                 old_crtc_state->bigjoiner_linked_crtc->active = false;
11994         }
11995
11996         /*
11997          * We need to disable pipe CRC before disabling the pipe,
11998          * or we race against vblank off.
11999          */
12000         intel_crtc_disable_pipe_crc(crtc);
12001
12002         dev_priv->display.crtc_disable(state, crtc);
12003         crtc->active = false;
12004         intel_fbc_disable(crtc);
12005         intel_disable_shared_dpll(old_crtc_state);
12006
12007         /* FIXME unify this for all platforms */
12008         if (!new_crtc_state->hw.active &&
12009             !HAS_GMCH(dev_priv) &&
12010             dev_priv->display.initial_watermarks)
12011                 dev_priv->display.initial_watermarks(state, crtc);
12012 }
12013
12014 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
12015 {
12016         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
12017         struct intel_crtc *crtc;
12018         u32 handled = 0;
12019         int i;
12020
12021         /* Only disable port sync and MST slaves */
12022         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12023                                             new_crtc_state, i) {
12024                 if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
12025                         continue;
12026
12027                 if (!old_crtc_state->hw.active)
12028                         continue;
12029
12030                 /* In case of Transcoder port Sync master slave CRTCs can be
12031                  * assigned in any order and we need to make sure that
12032                  * slave CRTCs are disabled first and then master CRTC since
12033                  * Slave vblanks are masked till Master Vblanks.
12034                  */
12035                 if (!is_trans_port_sync_slave(old_crtc_state) &&
12036                     !intel_dp_mst_is_slave_trans(old_crtc_state))
12037                         continue;
12038
12039                 intel_pre_plane_update(state, crtc);
12040                 intel_old_crtc_state_disables(state, old_crtc_state,
12041                                               new_crtc_state, crtc);
12042                 handled |= BIT(crtc->pipe);
12043         }
12044
12045         /* Disable everything else left on */
12046         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12047                                             new_crtc_state, i) {
12048                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
12049                     (handled & BIT(crtc->pipe)) ||
12050                     old_crtc_state->bigjoiner_slave)
12051                         continue;
12052
12053                 intel_pre_plane_update(state, crtc);
12054                 if (old_crtc_state->bigjoiner) {
12055                         struct intel_crtc *slave =
12056                                 old_crtc_state->bigjoiner_linked_crtc;
12057
12058                         intel_pre_plane_update(state, slave);
12059                 }
12060
12061                 if (old_crtc_state->hw.active)
12062                         intel_old_crtc_state_disables(state, old_crtc_state,
12063                                                       new_crtc_state, crtc);
12064         }
12065 }
12066
12067 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
12068 {
12069         struct intel_crtc_state *new_crtc_state;
12070         struct intel_crtc *crtc;
12071         int i;
12072
12073         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12074                 if (!new_crtc_state->hw.active)
12075                         continue;
12076
12077                 intel_enable_crtc(state, crtc);
12078                 intel_update_crtc(state, crtc);
12079         }
12080 }
12081
12082 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
12083 {
12084         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
12085         struct intel_crtc *crtc;
12086         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
12087         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
12088         u8 update_pipes = 0, modeset_pipes = 0;
12089         int i;
12090
12091         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12092                 enum pipe pipe = crtc->pipe;
12093
12094                 if (!new_crtc_state->hw.active)
12095                         continue;
12096
12097                 /* ignore allocations for crtc's that have been turned off. */
12098                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
12099                         entries[pipe] = old_crtc_state->wm.skl.ddb;
12100                         update_pipes |= BIT(pipe);
12101                 } else {
12102                         modeset_pipes |= BIT(pipe);
12103                 }
12104         }
12105
12106         /*
12107          * Whenever the number of active pipes changes, we need to make sure we
12108          * update the pipes in the right order so that their ddb allocations
12109          * never overlap with each other between CRTC updates. Otherwise we'll
12110          * cause pipe underruns and other bad stuff.
12111          *
12112          * So first lets enable all pipes that do not need a fullmodeset as
12113          * those don't have any external dependency.
12114          */
12115         while (update_pipes) {
12116                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12117                                                     new_crtc_state, i) {
12118                         enum pipe pipe = crtc->pipe;
12119
12120                         if ((update_pipes & BIT(pipe)) == 0)
12121                                 continue;
12122
12123                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
12124                                                         entries, I915_MAX_PIPES, pipe))
12125                                 continue;
12126
12127                         entries[pipe] = new_crtc_state->wm.skl.ddb;
12128                         update_pipes &= ~BIT(pipe);
12129
12130                         intel_update_crtc(state, crtc);
12131
12132                         /*
12133                          * If this is an already active pipe, it's DDB changed,
12134                          * and this isn't the last pipe that needs updating
12135                          * then we need to wait for a vblank to pass for the
12136                          * new ddb allocation to take effect.
12137                          */
12138                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
12139                                                  &old_crtc_state->wm.skl.ddb) &&
12140                             (update_pipes | modeset_pipes))
12141                                 intel_wait_for_vblank(dev_priv, pipe);
12142                 }
12143         }
12144
12145         update_pipes = modeset_pipes;
12146
12147         /*
12148          * Enable all pipes that needs a modeset and do not depends on other
12149          * pipes
12150          */
12151         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12152                 enum pipe pipe = crtc->pipe;
12153
12154                 if ((modeset_pipes & BIT(pipe)) == 0)
12155                         continue;
12156
12157                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
12158                     is_trans_port_sync_master(new_crtc_state) ||
12159                     (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
12160                         continue;
12161
12162                 modeset_pipes &= ~BIT(pipe);
12163
12164                 intel_enable_crtc(state, crtc);
12165         }
12166
12167         /*
12168          * Then we enable all remaining pipes that depend on other
12169          * pipes: MST slaves and port sync masters, big joiner master
12170          */
12171         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12172                 enum pipe pipe = crtc->pipe;
12173
12174                 if ((modeset_pipes & BIT(pipe)) == 0)
12175                         continue;
12176
12177                 modeset_pipes &= ~BIT(pipe);
12178
12179                 intel_enable_crtc(state, crtc);
12180         }
12181
12182         /*
12183          * Finally we do the plane updates/etc. for all pipes that got enabled.
12184          */
12185         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12186                 enum pipe pipe = crtc->pipe;
12187
12188                 if ((update_pipes & BIT(pipe)) == 0)
12189                         continue;
12190
12191                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
12192                                                                         entries, I915_MAX_PIPES, pipe));
12193
12194                 entries[pipe] = new_crtc_state->wm.skl.ddb;
12195                 update_pipes &= ~BIT(pipe);
12196
12197                 intel_update_crtc(state, crtc);
12198         }
12199
12200         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
12201         drm_WARN_ON(&dev_priv->drm, update_pipes);
12202 }
12203
12204 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12205 {
12206         struct intel_atomic_state *state, *next;
12207         struct llist_node *freed;
12208
12209         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12210         llist_for_each_entry_safe(state, next, freed, freed)
12211                 drm_atomic_state_put(&state->base);
12212 }
12213
12214 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12215 {
12216         struct drm_i915_private *dev_priv =
12217                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12218
12219         intel_atomic_helper_free_state(dev_priv);
12220 }
12221
12222 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12223 {
12224         struct wait_queue_entry wait_fence, wait_reset;
12225         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12226
12227         init_wait_entry(&wait_fence, 0);
12228         init_wait_entry(&wait_reset, 0);
12229         for (;;) {
12230                 prepare_to_wait(&intel_state->commit_ready.wait,
12231                                 &wait_fence, TASK_UNINTERRUPTIBLE);
12232                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
12233                                               I915_RESET_MODESET),
12234                                 &wait_reset, TASK_UNINTERRUPTIBLE);
12235
12236
12237                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
12238                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
12239                         break;
12240
12241                 schedule();
12242         }
12243         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12244         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
12245                                   I915_RESET_MODESET),
12246                     &wait_reset);
12247 }
12248
12249 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
12250 {
12251         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
12252         struct intel_crtc *crtc;
12253         int i;
12254
12255         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12256                                             new_crtc_state, i)
12257                 intel_dsb_cleanup(old_crtc_state);
12258 }
12259
12260 static void intel_atomic_cleanup_work(struct work_struct *work)
12261 {
12262         struct intel_atomic_state *state =
12263                 container_of(work, struct intel_atomic_state, base.commit_work);
12264         struct drm_i915_private *i915 = to_i915(state->base.dev);
12265
12266         intel_cleanup_dsbs(state);
12267         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
12268         drm_atomic_helper_commit_cleanup_done(&state->base);
12269         drm_atomic_state_put(&state->base);
12270
12271         intel_atomic_helper_free_state(i915);
12272 }
12273
12274 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
12275 {
12276         struct drm_i915_private *i915 = to_i915(state->base.dev);
12277         struct intel_plane *plane;
12278         struct intel_plane_state *plane_state;
12279         int i;
12280
12281         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12282                 struct drm_framebuffer *fb = plane_state->hw.fb;
12283                 int ret;
12284
12285                 if (!fb ||
12286                     fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
12287                         continue;
12288
12289                 /*
12290                  * The layout of the fast clear color value expected by HW
12291                  * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
12292                  * - 4 x 4 bytes per-channel value
12293                  *   (in surface type specific float/int format provided by the fb user)
12294                  * - 8 bytes native color value used by the display
12295                  *   (converted/written by GPU during a fast clear operation using the
12296                  *    above per-channel values)
12297                  *
12298                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
12299                  * caller made sure that the object is synced wrt. the related color clear value
12300                  * GPU write on it.
12301                  */
12302                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
12303                                                      fb->offsets[2] + 16,
12304                                                      &plane_state->ccval,
12305                                                      sizeof(plane_state->ccval));
12306                 /* The above could only fail if the FB obj has an unexpected backing store type. */
12307                 drm_WARN_ON(&i915->drm, ret);
12308         }
12309 }
12310
12311 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
12312 {
12313         struct drm_device *dev = state->base.dev;
12314         struct drm_i915_private *dev_priv = to_i915(dev);
12315         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
12316         struct intel_crtc *crtc;
12317         u64 put_domains[I915_MAX_PIPES] = {};
12318         intel_wakeref_t wakeref = 0;
12319         int i;
12320
12321         intel_atomic_commit_fence_wait(state);
12322
12323         drm_atomic_helper_wait_for_dependencies(&state->base);
12324
12325         if (state->modeset)
12326                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12327
12328         intel_atomic_prepare_plane_clear_colors(state);
12329
12330         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12331                                             new_crtc_state, i) {
12332                 if (intel_crtc_needs_modeset(new_crtc_state) ||
12333                     new_crtc_state->update_pipe) {
12334
12335                         put_domains[crtc->pipe] =
12336                                 modeset_get_crtc_power_domains(new_crtc_state);
12337                 }
12338         }
12339
12340         intel_commit_modeset_disables(state);
12341
12342         /* FIXME: Eventually get rid of our crtc->config pointer */
12343         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
12344                 crtc->config = new_crtc_state;
12345
12346         if (state->modeset) {
12347                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
12348
12349                 intel_set_cdclk_pre_plane_update(state);
12350
12351                 intel_modeset_verify_disabled(dev_priv, state);
12352         }
12353
12354         intel_sagv_pre_plane_update(state);
12355
12356         /* Complete the events for pipes that have now been disabled */
12357         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12358                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
12359
12360                 /* Complete events for now disable pipes here. */
12361                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
12362                         spin_lock_irq(&dev->event_lock);
12363                         drm_crtc_send_vblank_event(&crtc->base,
12364                                                    new_crtc_state->uapi.event);
12365                         spin_unlock_irq(&dev->event_lock);
12366
12367                         new_crtc_state->uapi.event = NULL;
12368                 }
12369         }
12370
12371         if (state->modeset)
12372                 intel_encoders_update_prepare(state);
12373
12374         intel_dbuf_pre_plane_update(state);
12375
12376         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12377                 if (new_crtc_state->uapi.async_flip)
12378                         intel_crtc_enable_flip_done(state, crtc);
12379         }
12380
12381         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12382         dev_priv->display.commit_modeset_enables(state);
12383
12384         if (state->modeset) {
12385                 intel_encoders_update_complete(state);
12386
12387                 intel_set_cdclk_post_plane_update(state);
12388         }
12389
12390         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12391          * already, but still need the state for the delayed optimization. To
12392          * fix this:
12393          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12394          * - schedule that vblank worker _before_ calling hw_done
12395          * - at the start of commit_tail, cancel it _synchrously
12396          * - switch over to the vblank wait helper in the core after that since
12397          *   we don't need out special handling any more.
12398          */
12399         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
12400
12401         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
12402                 if (new_crtc_state->uapi.async_flip)
12403                         intel_crtc_disable_flip_done(state, crtc);
12404
12405                 if (new_crtc_state->hw.active &&
12406                     !intel_crtc_needs_modeset(new_crtc_state) &&
12407                     !new_crtc_state->preload_luts &&
12408                     (new_crtc_state->uapi.color_mgmt_changed ||
12409                      new_crtc_state->update_pipe))
12410                         intel_color_load_luts(new_crtc_state);
12411         }
12412
12413         /*
12414          * Now that the vblank has passed, we can go ahead and program the
12415          * optimal watermarks on platforms that need two-step watermark
12416          * programming.
12417          *
12418          * TODO: Move this (and other cleanup) to an async worker eventually.
12419          */
12420         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
12421                                             new_crtc_state, i) {
12422                 /*
12423                  * Gen2 reports pipe underruns whenever all planes are disabled.
12424                  * So re-enable underrun reporting after some planes get enabled.
12425                  *
12426                  * We do this before .optimize_watermarks() so that we have a
12427                  * chance of catching underruns with the intermediate watermarks
12428                  * vs. the new plane configuration.
12429                  */
12430                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
12431                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
12432
12433                 if (dev_priv->display.optimize_watermarks)
12434                         dev_priv->display.optimize_watermarks(state, crtc);
12435         }
12436
12437         intel_dbuf_post_plane_update(state);
12438
12439         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12440                 intel_post_plane_update(state, crtc);
12441
12442                 modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
12443
12444                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12445
12446                 /*
12447                  * DSB cleanup is done in cleanup_work aligning with framebuffer
12448                  * cleanup. So copy and reset the dsb structure to sync with
12449                  * commit_done and later do dsb cleanup in cleanup_work.
12450                  */
12451                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
12452         }
12453
12454         /* Underruns don't always raise interrupts, so check manually */
12455         intel_check_cpu_fifo_underruns(dev_priv);
12456         intel_check_pch_fifo_underruns(dev_priv);
12457
12458         if (state->modeset)
12459                 intel_verify_planes(state);
12460
12461         intel_sagv_post_plane_update(state);
12462
12463         drm_atomic_helper_commit_hw_done(&state->base);
12464
12465         if (state->modeset) {
12466                 /* As one of the primary mmio accessors, KMS has a high
12467                  * likelihood of triggering bugs in unclaimed access. After we
12468                  * finish modesetting, see if an error has been flagged, and if
12469                  * so enable debugging for the next modeset - and hope we catch
12470                  * the culprit.
12471                  */
12472                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
12473                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
12474         }
12475         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
12476
12477         /*
12478          * Defer the cleanup of the old state to a separate worker to not
12479          * impede the current task (userspace for blocking modesets) that
12480          * are executed inline. For out-of-line asynchronous modesets/flips,
12481          * deferring to a new worker seems overkill, but we would place a
12482          * schedule point (cond_resched()) here anyway to keep latencies
12483          * down.
12484          */
12485         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
12486         queue_work(system_highpri_wq, &state->base.commit_work);
12487 }
12488
12489 static void intel_atomic_commit_work(struct work_struct *work)
12490 {
12491         struct intel_atomic_state *state =
12492                 container_of(work, struct intel_atomic_state, base.commit_work);
12493
12494         intel_atomic_commit_tail(state);
12495 }
12496
12497 static int __i915_sw_fence_call
12498 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12499                           enum i915_sw_fence_notify notify)
12500 {
12501         struct intel_atomic_state *state =
12502                 container_of(fence, struct intel_atomic_state, commit_ready);
12503
12504         switch (notify) {
12505         case FENCE_COMPLETE:
12506                 /* we do blocking waits in the worker, nothing to do here */
12507                 break;
12508         case FENCE_FREE:
12509                 {
12510                         struct intel_atomic_helper *helper =
12511                                 &to_i915(state->base.dev)->atomic_helper;
12512
12513                         if (llist_add(&state->freed, &helper->free_list))
12514                                 schedule_work(&helper->free_work);
12515                         break;
12516                 }
12517         }
12518
12519         return NOTIFY_DONE;
12520 }
12521
12522 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
12523 {
12524         struct intel_plane_state *old_plane_state, *new_plane_state;
12525         struct intel_plane *plane;
12526         int i;
12527
12528         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
12529                                              new_plane_state, i)
12530                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
12531                                         to_intel_frontbuffer(new_plane_state->hw.fb),
12532                                         plane->frontbuffer_bit);
12533 }
12534
12535 static int intel_atomic_commit(struct drm_device *dev,
12536                                struct drm_atomic_state *_state,
12537                                bool nonblock)
12538 {
12539         struct intel_atomic_state *state = to_intel_atomic_state(_state);
12540         struct drm_i915_private *dev_priv = to_i915(dev);
12541         int ret = 0;
12542
12543         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
12544
12545         drm_atomic_state_get(&state->base);
12546         i915_sw_fence_init(&state->commit_ready,
12547                            intel_atomic_commit_ready);
12548
12549         /*
12550          * The intel_legacy_cursor_update() fast path takes care
12551          * of avoiding the vblank waits for simple cursor
12552          * movement and flips. For cursor on/off and size changes,
12553          * we want to perform the vblank waits so that watermark
12554          * updates happen during the correct frames. Gen9+ have
12555          * double buffered watermarks and so shouldn't need this.
12556          *
12557          * Unset state->legacy_cursor_update before the call to
12558          * drm_atomic_helper_setup_commit() because otherwise
12559          * drm_atomic_helper_wait_for_flip_done() is a noop and
12560          * we get FIFO underruns because we didn't wait
12561          * for vblank.
12562          *
12563          * FIXME doing watermarks and fb cleanup from a vblank worker
12564          * (assuming we had any) would solve these problems.
12565          */
12566         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
12567                 struct intel_crtc_state *new_crtc_state;
12568                 struct intel_crtc *crtc;
12569                 int i;
12570
12571                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
12572                         if (new_crtc_state->wm.need_postvbl_update ||
12573                             new_crtc_state->update_wm_post)
12574                                 state->base.legacy_cursor_update = false;
12575         }
12576
12577         ret = intel_atomic_prepare_commit(state);
12578         if (ret) {
12579                 drm_dbg_atomic(&dev_priv->drm,
12580                                "Preparing state failed with %i\n", ret);
12581                 i915_sw_fence_commit(&state->commit_ready);
12582                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
12583                 return ret;
12584         }
12585
12586         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
12587         if (!ret)
12588                 ret = drm_atomic_helper_swap_state(&state->base, true);
12589         if (!ret)
12590                 intel_atomic_swap_global_state(state);
12591
12592         if (ret) {
12593                 struct intel_crtc_state *new_crtc_state;
12594                 struct intel_crtc *crtc;
12595                 int i;
12596
12597                 i915_sw_fence_commit(&state->commit_ready);
12598
12599                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
12600                         intel_dsb_cleanup(new_crtc_state);
12601
12602                 drm_atomic_helper_cleanup_planes(dev, &state->base);
12603                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
12604                 return ret;
12605         }
12606         intel_shared_dpll_swap_state(state);
12607         intel_atomic_track_fbs(state);
12608
12609         drm_atomic_state_get(&state->base);
12610         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
12611
12612         i915_sw_fence_commit(&state->commit_ready);
12613         if (nonblock && state->modeset) {
12614                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
12615         } else if (nonblock) {
12616                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
12617         } else {
12618                 if (state->modeset)
12619                         flush_workqueue(dev_priv->modeset_wq);
12620                 intel_atomic_commit_tail(state);
12621         }
12622
12623         return 0;
12624 }
12625
12626 struct wait_rps_boost {
12627         struct wait_queue_entry wait;
12628
12629         struct drm_crtc *crtc;
12630         struct i915_request *request;
12631 };
12632
12633 static int do_rps_boost(struct wait_queue_entry *_wait,
12634                         unsigned mode, int sync, void *key)
12635 {
12636         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
12637         struct i915_request *rq = wait->request;
12638
12639         /*
12640          * If we missed the vblank, but the request is already running it
12641          * is reasonable to assume that it will complete before the next
12642          * vblank without our intervention, so leave RPS alone.
12643          */
12644         if (!i915_request_started(rq))
12645                 intel_rps_boost(rq);
12646         i915_request_put(rq);
12647
12648         drm_crtc_vblank_put(wait->crtc);
12649
12650         list_del(&wait->wait.entry);
12651         kfree(wait);
12652         return 1;
12653 }
12654
12655 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12656                                        struct dma_fence *fence)
12657 {
12658         struct wait_rps_boost *wait;
12659
12660         if (!dma_fence_is_i915(fence))
12661                 return;
12662
12663         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
12664                 return;
12665
12666         if (drm_crtc_vblank_get(crtc))
12667                 return;
12668
12669         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
12670         if (!wait) {
12671                 drm_crtc_vblank_put(crtc);
12672                 return;
12673         }
12674
12675         wait->request = to_request(dma_fence_get(fence));
12676         wait->crtc = crtc;
12677
12678         wait->wait.func = do_rps_boost;
12679         wait->wait.flags = 0;
12680
12681         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
12682 }
12683
12684 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12685 {
12686         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12687         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12688         struct drm_framebuffer *fb = plane_state->hw.fb;
12689         struct i915_vma *vma;
12690
12691         if (plane->id == PLANE_CURSOR &&
12692             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
12693                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12694                 const int align = intel_cursor_alignment(dev_priv);
12695                 int err;
12696
12697                 err = i915_gem_object_attach_phys(obj, align);
12698                 if (err)
12699                         return err;
12700         }
12701
12702         vma = intel_pin_and_fence_fb_obj(fb,
12703                                          &plane_state->view,
12704                                          intel_plane_uses_fence(plane_state),
12705                                          &plane_state->flags);
12706         if (IS_ERR(vma))
12707                 return PTR_ERR(vma);
12708
12709         plane_state->vma = vma;
12710
12711         return 0;
12712 }
12713
12714 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
12715 {
12716         struct i915_vma *vma;
12717
12718         vma = fetch_and_zero(&old_plane_state->vma);
12719         if (vma)
12720                 intel_unpin_fb_vma(vma, old_plane_state->flags);
12721 }
12722
12723 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
12724 {
12725         struct i915_sched_attr attr = {
12726                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
12727         };
12728
12729         i915_gem_object_wait_priority(obj, 0, &attr);
12730 }
12731
12732 /**
12733  * intel_prepare_plane_fb - Prepare fb for usage on plane
12734  * @_plane: drm plane to prepare for
12735  * @_new_plane_state: the plane state being prepared
12736  *
12737  * Prepares a framebuffer for usage on a display plane.  Generally this
12738  * involves pinning the underlying object and updating the frontbuffer tracking
12739  * bits.  Some older platforms need special physical address handling for
12740  * cursor planes.
12741  *
12742  * Returns 0 on success, negative error code on failure.
12743  */
12744 int
12745 intel_prepare_plane_fb(struct drm_plane *_plane,
12746                        struct drm_plane_state *_new_plane_state)
12747 {
12748         struct intel_plane *plane = to_intel_plane(_plane);
12749         struct intel_plane_state *new_plane_state =
12750                 to_intel_plane_state(_new_plane_state);
12751         struct intel_atomic_state *state =
12752                 to_intel_atomic_state(new_plane_state->uapi.state);
12753         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12754         const struct intel_plane_state *old_plane_state =
12755                 intel_atomic_get_old_plane_state(state, plane);
12756         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
12757         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
12758         int ret;
12759
12760         if (old_obj) {
12761                 const struct intel_crtc_state *crtc_state =
12762                         intel_atomic_get_new_crtc_state(state,
12763                                                         to_intel_crtc(old_plane_state->hw.crtc));
12764
12765                 /* Big Hammer, we also need to ensure that any pending
12766                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
12767                  * current scanout is retired before unpinning the old
12768                  * framebuffer. Note that we rely on userspace rendering
12769                  * into the buffer attached to the pipe they are waiting
12770                  * on. If not, userspace generates a GPU hang with IPEHR
12771                  * point to the MI_WAIT_FOR_EVENT.
12772                  *
12773                  * This should only fail upon a hung GPU, in which case we
12774                  * can safely continue.
12775                  */
12776                 if (intel_crtc_needs_modeset(crtc_state)) {
12777                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
12778                                                               old_obj->base.resv, NULL,
12779                                                               false, 0,
12780                                                               GFP_KERNEL);
12781                         if (ret < 0)
12782                                 return ret;
12783                 }
12784         }
12785
12786         if (new_plane_state->uapi.fence) { /* explicit fencing */
12787                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
12788                                                     new_plane_state->uapi.fence,
12789                                                     i915_fence_timeout(dev_priv),
12790                                                     GFP_KERNEL);
12791                 if (ret < 0)
12792                         return ret;
12793         }
12794
12795         if (!obj)
12796                 return 0;
12797
12798         ret = i915_gem_object_pin_pages(obj);
12799         if (ret)
12800                 return ret;
12801
12802         ret = intel_plane_pin_fb(new_plane_state);
12803
12804         i915_gem_object_unpin_pages(obj);
12805         if (ret)
12806                 return ret;
12807
12808         fb_obj_bump_render_priority(obj);
12809         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
12810
12811         if (!new_plane_state->uapi.fence) { /* implicit fencing */
12812                 struct dma_fence *fence;
12813
12814                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
12815                                                       obj->base.resv, NULL,
12816                                                       false,
12817                                                       i915_fence_timeout(dev_priv),
12818                                                       GFP_KERNEL);
12819                 if (ret < 0)
12820                         goto unpin_fb;
12821
12822                 fence = dma_resv_get_excl_rcu(obj->base.resv);
12823                 if (fence) {
12824                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
12825                                                    fence);
12826                         dma_fence_put(fence);
12827                 }
12828         } else {
12829                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
12830                                            new_plane_state->uapi.fence);
12831         }
12832
12833         /*
12834          * We declare pageflips to be interactive and so merit a small bias
12835          * towards upclocking to deliver the frame on time. By only changing
12836          * the RPS thresholds to sample more regularly and aim for higher
12837          * clocks we can hopefully deliver low power workloads (like kodi)
12838          * that are not quite steady state without resorting to forcing
12839          * maximum clocks following a vblank miss (see do_rps_boost()).
12840          */
12841         if (!state->rps_interactive) {
12842                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
12843                 state->rps_interactive = true;
12844         }
12845
12846         return 0;
12847
12848 unpin_fb:
12849         intel_plane_unpin_fb(new_plane_state);
12850
12851         return ret;
12852 }
12853
12854 /**
12855  * intel_cleanup_plane_fb - Cleans up an fb after plane use
12856  * @plane: drm plane to clean up for
12857  * @_old_plane_state: the state from the previous modeset
12858  *
12859  * Cleans up a framebuffer that has just been removed from a plane.
12860  */
12861 void
12862 intel_cleanup_plane_fb(struct drm_plane *plane,
12863                        struct drm_plane_state *_old_plane_state)
12864 {
12865         struct intel_plane_state *old_plane_state =
12866                 to_intel_plane_state(_old_plane_state);
12867         struct intel_atomic_state *state =
12868                 to_intel_atomic_state(old_plane_state->uapi.state);
12869         struct drm_i915_private *dev_priv = to_i915(plane->dev);
12870         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
12871
12872         if (!obj)
12873                 return;
12874
12875         if (state->rps_interactive) {
12876                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
12877                 state->rps_interactive = false;
12878         }
12879
12880         /* Should only be called after a successful intel_prepare_plane_fb()! */
12881         intel_plane_unpin_fb(old_plane_state);
12882 }
12883
12884 /**
12885  * intel_plane_destroy - destroy a plane
12886  * @plane: plane to destroy
12887  *
12888  * Common destruction function for all types of planes (primary, cursor,
12889  * sprite).
12890  */
12891 void intel_plane_destroy(struct drm_plane *plane)
12892 {
12893         drm_plane_cleanup(plane);
12894         kfree(to_intel_plane(plane));
12895 }
12896
12897 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
12898 {
12899         struct intel_plane *plane;
12900
12901         for_each_intel_plane(&dev_priv->drm, plane) {
12902                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
12903                                                                   plane->pipe);
12904
12905                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
12906         }
12907 }
12908
12909
12910 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
12911                                       struct drm_file *file)
12912 {
12913         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
12914         struct drm_crtc *drmmode_crtc;
12915         struct intel_crtc *crtc;
12916
12917         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
12918         if (!drmmode_crtc)
12919                 return -ENOENT;
12920
12921         crtc = to_intel_crtc(drmmode_crtc);
12922         pipe_from_crtc_id->pipe = crtc->pipe;
12923
12924         return 0;
12925 }
12926
12927 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
12928 {
12929         struct drm_device *dev = encoder->base.dev;
12930         struct intel_encoder *source_encoder;
12931         u32 possible_clones = 0;
12932
12933         for_each_intel_encoder(dev, source_encoder) {
12934                 if (encoders_cloneable(encoder, source_encoder))
12935                         possible_clones |= drm_encoder_mask(&source_encoder->base);
12936         }
12937
12938         return possible_clones;
12939 }
12940
12941 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
12942 {
12943         struct drm_device *dev = encoder->base.dev;
12944         struct intel_crtc *crtc;
12945         u32 possible_crtcs = 0;
12946
12947         for_each_intel_crtc(dev, crtc) {
12948                 if (encoder->pipe_mask & BIT(crtc->pipe))
12949                         possible_crtcs |= drm_crtc_mask(&crtc->base);
12950         }
12951
12952         return possible_crtcs;
12953 }
12954
12955 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
12956 {
12957         if (!IS_MOBILE(dev_priv))
12958                 return false;
12959
12960         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
12961                 return false;
12962
12963         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
12964                 return false;
12965
12966         return true;
12967 }
12968
12969 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
12970 {
12971         if (INTEL_GEN(dev_priv) >= 9)
12972                 return false;
12973
12974         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
12975                 return false;
12976
12977         if (HAS_PCH_LPT_H(dev_priv) &&
12978             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
12979                 return false;
12980
12981         /* DDI E can't be used if DDI A requires 4 lanes */
12982         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
12983                 return false;
12984
12985         if (!dev_priv->vbt.int_crt_support)
12986                 return false;
12987
12988         return true;
12989 }
12990
12991 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
12992 {
12993         struct intel_encoder *encoder;
12994         bool dpd_is_edp = false;
12995
12996         intel_pps_unlock_regs_wa(dev_priv);
12997
12998         if (!HAS_DISPLAY(dev_priv))
12999                 return;
13000
13001         if (IS_ALDERLAKE_S(dev_priv)) {
13002                 intel_ddi_init(dev_priv, PORT_A);
13003                 intel_ddi_init(dev_priv, PORT_TC1);
13004                 intel_ddi_init(dev_priv, PORT_TC2);
13005                 intel_ddi_init(dev_priv, PORT_TC3);
13006                 intel_ddi_init(dev_priv, PORT_TC4);
13007         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
13008                 intel_ddi_init(dev_priv, PORT_A);
13009                 intel_ddi_init(dev_priv, PORT_B);
13010                 intel_ddi_init(dev_priv, PORT_TC1);
13011                 intel_ddi_init(dev_priv, PORT_TC2);
13012         } else if (INTEL_GEN(dev_priv) >= 12) {
13013                 intel_ddi_init(dev_priv, PORT_A);
13014                 intel_ddi_init(dev_priv, PORT_B);
13015                 intel_ddi_init(dev_priv, PORT_TC1);
13016                 intel_ddi_init(dev_priv, PORT_TC2);
13017                 intel_ddi_init(dev_priv, PORT_TC3);
13018                 intel_ddi_init(dev_priv, PORT_TC4);
13019                 intel_ddi_init(dev_priv, PORT_TC5);
13020                 intel_ddi_init(dev_priv, PORT_TC6);
13021                 icl_dsi_init(dev_priv);
13022         } else if (IS_JSL_EHL(dev_priv)) {
13023                 intel_ddi_init(dev_priv, PORT_A);
13024                 intel_ddi_init(dev_priv, PORT_B);
13025                 intel_ddi_init(dev_priv, PORT_C);
13026                 intel_ddi_init(dev_priv, PORT_D);
13027                 icl_dsi_init(dev_priv);
13028         } else if (IS_GEN(dev_priv, 11)) {
13029                 intel_ddi_init(dev_priv, PORT_A);
13030                 intel_ddi_init(dev_priv, PORT_B);
13031                 intel_ddi_init(dev_priv, PORT_C);
13032                 intel_ddi_init(dev_priv, PORT_D);
13033                 intel_ddi_init(dev_priv, PORT_E);
13034                 /*
13035                  * On some ICL SKUs port F is not present. No strap bits for
13036                  * this, so rely on VBT.
13037                  * Work around broken VBTs on SKUs known to have no port F.
13038                  */
13039                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
13040                     intel_bios_is_port_present(dev_priv, PORT_F))
13041                         intel_ddi_init(dev_priv, PORT_F);
13042
13043                 icl_dsi_init(dev_priv);
13044         } else if (IS_GEN9_LP(dev_priv)) {
13045                 /*
13046                  * FIXME: Broxton doesn't support port detection via the
13047                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
13048                  * detect the ports.
13049                  */
13050                 intel_ddi_init(dev_priv, PORT_A);
13051                 intel_ddi_init(dev_priv, PORT_B);
13052                 intel_ddi_init(dev_priv, PORT_C);
13053
13054                 vlv_dsi_init(dev_priv);
13055         } else if (HAS_DDI(dev_priv)) {
13056                 int found;
13057
13058                 if (intel_ddi_crt_present(dev_priv))
13059                         intel_crt_init(dev_priv);
13060
13061                 /*
13062                  * Haswell uses DDI functions to detect digital outputs.
13063                  * On SKL pre-D0 the strap isn't connected, so we assume
13064                  * it's there.
13065                  */
13066                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
13067                 /* WaIgnoreDDIAStrap: skl */
13068                 if (found || IS_GEN9_BC(dev_priv))
13069                         intel_ddi_init(dev_priv, PORT_A);
13070
13071                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
13072                  * register */
13073                 found = intel_de_read(dev_priv, SFUSE_STRAP);
13074
13075                 if (found & SFUSE_STRAP_DDIB_DETECTED)
13076                         intel_ddi_init(dev_priv, PORT_B);
13077                 if (found & SFUSE_STRAP_DDIC_DETECTED)
13078                         intel_ddi_init(dev_priv, PORT_C);
13079                 if (found & SFUSE_STRAP_DDID_DETECTED)
13080                         intel_ddi_init(dev_priv, PORT_D);
13081                 if (found & SFUSE_STRAP_DDIF_DETECTED)
13082                         intel_ddi_init(dev_priv, PORT_F);
13083                 /*
13084                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
13085                  */
13086                 if (IS_GEN9_BC(dev_priv) &&
13087                     intel_bios_is_port_present(dev_priv, PORT_E))
13088                         intel_ddi_init(dev_priv, PORT_E);
13089
13090         } else if (HAS_PCH_SPLIT(dev_priv)) {
13091                 int found;
13092
13093                 /*
13094                  * intel_edp_init_connector() depends on this completing first,
13095                  * to prevent the registration of both eDP and LVDS and the
13096                  * incorrect sharing of the PPS.
13097                  */
13098                 intel_lvds_init(dev_priv);
13099                 intel_crt_init(dev_priv);
13100
13101                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
13102
13103                 if (ilk_has_edp_a(dev_priv))
13104                         intel_dp_init(dev_priv, DP_A, PORT_A);
13105
13106                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
13107                         /* PCH SDVOB multiplex with HDMIB */
13108                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
13109                         if (!found)
13110                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
13111                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
13112                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
13113                 }
13114
13115                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
13116                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
13117
13118                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
13119                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
13120
13121                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
13122                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
13123
13124                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
13125                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
13126         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13127                 bool has_edp, has_port;
13128
13129                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
13130                         intel_crt_init(dev_priv);
13131
13132                 /*
13133                  * The DP_DETECTED bit is the latched state of the DDC
13134                  * SDA pin at boot. However since eDP doesn't require DDC
13135                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
13136                  * eDP ports may have been muxed to an alternate function.
13137                  * Thus we can't rely on the DP_DETECTED bit alone to detect
13138                  * eDP ports. Consult the VBT as well as DP_DETECTED to
13139                  * detect eDP ports.
13140                  *
13141                  * Sadly the straps seem to be missing sometimes even for HDMI
13142                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
13143                  * and VBT for the presence of the port. Additionally we can't
13144                  * trust the port type the VBT declares as we've seen at least
13145                  * HDMI ports that the VBT claim are DP or eDP.
13146                  */
13147                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
13148                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
13149                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
13150                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
13151                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
13152                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
13153
13154                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
13155                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
13156                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
13157                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
13158                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
13159                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
13160
13161                 if (IS_CHERRYVIEW(dev_priv)) {
13162                         /*
13163                          * eDP not supported on port D,
13164                          * so no need to worry about it
13165                          */
13166                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
13167                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
13168                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
13169                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
13170                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
13171                 }
13172
13173                 vlv_dsi_init(dev_priv);
13174         } else if (IS_PINEVIEW(dev_priv)) {
13175                 intel_lvds_init(dev_priv);
13176                 intel_crt_init(dev_priv);
13177         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
13178                 bool found = false;
13179
13180                 if (IS_MOBILE(dev_priv))
13181                         intel_lvds_init(dev_priv);
13182
13183                 intel_crt_init(dev_priv);
13184
13185                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
13186                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
13187                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
13188                         if (!found && IS_G4X(dev_priv)) {
13189                                 drm_dbg_kms(&dev_priv->drm,
13190                                             "probing HDMI on SDVOB\n");
13191                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
13192                         }
13193
13194                         if (!found && IS_G4X(dev_priv))
13195                                 intel_dp_init(dev_priv, DP_B, PORT_B);
13196                 }
13197
13198                 /* Before G4X SDVOC doesn't have its own detect register */
13199
13200                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
13201                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
13202                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
13203                 }
13204
13205                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
13206
13207                         if (IS_G4X(dev_priv)) {
13208                                 drm_dbg_kms(&dev_priv->drm,
13209                                             "probing HDMI on SDVOC\n");
13210                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
13211                         }
13212                         if (IS_G4X(dev_priv))
13213                                 intel_dp_init(dev_priv, DP_C, PORT_C);
13214                 }
13215
13216                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
13217                         intel_dp_init(dev_priv, DP_D, PORT_D);
13218
13219                 if (SUPPORTS_TV(dev_priv))
13220                         intel_tv_init(dev_priv);
13221         } else if (IS_GEN(dev_priv, 2)) {
13222                 if (IS_I85X(dev_priv))
13223                         intel_lvds_init(dev_priv);
13224
13225                 intel_crt_init(dev_priv);
13226                 intel_dvo_init(dev_priv);
13227         }
13228
13229         for_each_intel_encoder(&dev_priv->drm, encoder) {
13230                 encoder->base.possible_crtcs =
13231                         intel_encoder_possible_crtcs(encoder);
13232                 encoder->base.possible_clones =
13233                         intel_encoder_possible_clones(encoder);
13234         }
13235
13236         intel_init_pch_refclk(dev_priv);
13237
13238         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
13239 }
13240
13241 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
13242 {
13243         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
13244
13245         drm_framebuffer_cleanup(fb);
13246         intel_frontbuffer_put(intel_fb->frontbuffer);
13247
13248         kfree(intel_fb);
13249 }
13250
13251 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
13252                                                 struct drm_file *file,
13253                                                 unsigned int *handle)
13254 {
13255         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13256         struct drm_i915_private *i915 = to_i915(obj->base.dev);
13257
13258         if (obj->userptr.mm) {
13259                 drm_dbg(&i915->drm,
13260                         "attempting to use a userptr for a framebuffer, denied\n");
13261                 return -EINVAL;
13262         }
13263
13264         return drm_gem_handle_create(file, &obj->base, handle);
13265 }
13266
13267 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
13268                                         struct drm_file *file,
13269                                         unsigned flags, unsigned color,
13270                                         struct drm_clip_rect *clips,
13271                                         unsigned num_clips)
13272 {
13273         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13274
13275         i915_gem_object_flush_if_display(obj);
13276         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
13277
13278         return 0;
13279 }
13280
13281 static const struct drm_framebuffer_funcs intel_fb_funcs = {
13282         .destroy = intel_user_framebuffer_destroy,
13283         .create_handle = intel_user_framebuffer_create_handle,
13284         .dirty = intel_user_framebuffer_dirty,
13285 };
13286
13287 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
13288                                   struct drm_i915_gem_object *obj,
13289                                   struct drm_mode_fb_cmd2 *mode_cmd)
13290 {
13291         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
13292         struct drm_framebuffer *fb = &intel_fb->base;
13293         u32 max_stride;
13294         unsigned int tiling, stride;
13295         int ret = -EINVAL;
13296         int i;
13297
13298         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
13299         if (!intel_fb->frontbuffer)
13300                 return -ENOMEM;
13301
13302         i915_gem_object_lock(obj, NULL);
13303         tiling = i915_gem_object_get_tiling(obj);
13304         stride = i915_gem_object_get_stride(obj);
13305         i915_gem_object_unlock(obj);
13306
13307         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
13308                 /*
13309                  * If there's a fence, enforce that
13310                  * the fb modifier and tiling mode match.
13311                  */
13312                 if (tiling != I915_TILING_NONE &&
13313                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
13314                         drm_dbg_kms(&dev_priv->drm,
13315                                     "tiling_mode doesn't match fb modifier\n");
13316                         goto err;
13317                 }
13318         } else {
13319                 if (tiling == I915_TILING_X) {
13320                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
13321                 } else if (tiling == I915_TILING_Y) {
13322                         drm_dbg_kms(&dev_priv->drm,
13323                                     "No Y tiling for legacy addfb\n");
13324                         goto err;
13325                 }
13326         }
13327
13328         if (!drm_any_plane_has_format(&dev_priv->drm,
13329                                       mode_cmd->pixel_format,
13330                                       mode_cmd->modifier[0])) {
13331                 struct drm_format_name_buf format_name;
13332
13333                 drm_dbg_kms(&dev_priv->drm,
13334                             "unsupported pixel format %s / modifier 0x%llx\n",
13335                             drm_get_format_name(mode_cmd->pixel_format,
13336                                                 &format_name),
13337                             mode_cmd->modifier[0]);
13338                 goto err;
13339         }
13340
13341         /*
13342          * gen2/3 display engine uses the fence if present,
13343          * so the tiling mode must match the fb modifier exactly.
13344          */
13345         if (INTEL_GEN(dev_priv) < 4 &&
13346             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
13347                 drm_dbg_kms(&dev_priv->drm,
13348                             "tiling_mode must match fb modifier exactly on gen2/3\n");
13349                 goto err;
13350         }
13351
13352         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
13353                                          mode_cmd->modifier[0]);
13354         if (mode_cmd->pitches[0] > max_stride) {
13355                 drm_dbg_kms(&dev_priv->drm,
13356                             "%s pitch (%u) must be at most %d\n",
13357                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
13358                             "tiled" : "linear",
13359                             mode_cmd->pitches[0], max_stride);
13360                 goto err;
13361         }
13362
13363         /*
13364          * If there's a fence, enforce that
13365          * the fb pitch and fence stride match.
13366          */
13367         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
13368                 drm_dbg_kms(&dev_priv->drm,
13369                             "pitch (%d) must match tiling stride (%d)\n",
13370                             mode_cmd->pitches[0], stride);
13371                 goto err;
13372         }
13373
13374         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
13375         if (mode_cmd->offsets[0] != 0) {
13376                 drm_dbg_kms(&dev_priv->drm,
13377                             "plane 0 offset (0x%08x) must be 0\n",
13378                             mode_cmd->offsets[0]);
13379                 goto err;
13380         }
13381
13382         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
13383
13384         for (i = 0; i < fb->format->num_planes; i++) {
13385                 u32 stride_alignment;
13386
13387                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
13388                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
13389                                     i);
13390                         goto err;
13391                 }
13392
13393                 stride_alignment = intel_fb_stride_alignment(fb, i);
13394                 if (fb->pitches[i] & (stride_alignment - 1)) {
13395                         drm_dbg_kms(&dev_priv->drm,
13396                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
13397                                     i, fb->pitches[i], stride_alignment);
13398                         goto err;
13399                 }
13400
13401                 if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
13402                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
13403
13404                         if (fb->pitches[i] != ccs_aux_stride) {
13405                                 drm_dbg_kms(&dev_priv->drm,
13406                                             "ccs aux plane %d pitch (%d) must be %d\n",
13407                                             i,
13408                                             fb->pitches[i], ccs_aux_stride);
13409                                 goto err;
13410                         }
13411                 }
13412
13413                 fb->obj[i] = &obj->base;
13414         }
13415
13416         ret = intel_fill_fb_info(dev_priv, fb);
13417         if (ret)
13418                 goto err;
13419
13420         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
13421         if (ret) {
13422                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
13423                 goto err;
13424         }
13425
13426         return 0;
13427
13428 err:
13429         intel_frontbuffer_put(intel_fb->frontbuffer);
13430         return ret;
13431 }
13432
13433 static struct drm_framebuffer *
13434 intel_user_framebuffer_create(struct drm_device *dev,
13435                               struct drm_file *filp,
13436                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
13437 {
13438         struct drm_framebuffer *fb;
13439         struct drm_i915_gem_object *obj;
13440         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
13441
13442         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
13443         if (!obj)
13444                 return ERR_PTR(-ENOENT);
13445
13446         fb = intel_framebuffer_create(obj, &mode_cmd);
13447         i915_gem_object_put(obj);
13448
13449         return fb;
13450 }
13451
13452 static enum drm_mode_status
13453 intel_mode_valid(struct drm_device *dev,
13454                  const struct drm_display_mode *mode)
13455 {
13456         struct drm_i915_private *dev_priv = to_i915(dev);
13457         int hdisplay_max, htotal_max;
13458         int vdisplay_max, vtotal_max;
13459
13460         /*
13461          * Can't reject DBLSCAN here because Xorg ddxen can add piles
13462          * of DBLSCAN modes to the output's mode list when they detect
13463          * the scaling mode property on the connector. And they don't
13464          * ask the kernel to validate those modes in any way until
13465          * modeset time at which point the client gets a protocol error.
13466          * So in order to not upset those clients we silently ignore the
13467          * DBLSCAN flag on such connectors. For other connectors we will
13468          * reject modes with the DBLSCAN flag in encoder->compute_config().
13469          * And we always reject DBLSCAN modes in connector->mode_valid()
13470          * as we never want such modes on the connector's mode list.
13471          */
13472
13473         if (mode->vscan > 1)
13474                 return MODE_NO_VSCAN;
13475
13476         if (mode->flags & DRM_MODE_FLAG_HSKEW)
13477                 return MODE_H_ILLEGAL;
13478
13479         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
13480                            DRM_MODE_FLAG_NCSYNC |
13481                            DRM_MODE_FLAG_PCSYNC))
13482                 return MODE_HSYNC;
13483
13484         if (mode->flags & (DRM_MODE_FLAG_BCAST |
13485                            DRM_MODE_FLAG_PIXMUX |
13486                            DRM_MODE_FLAG_CLKDIV2))
13487                 return MODE_BAD;
13488
13489         /* Transcoder timing limits */
13490         if (INTEL_GEN(dev_priv) >= 11) {
13491                 hdisplay_max = 16384;
13492                 vdisplay_max = 8192;
13493                 htotal_max = 16384;
13494                 vtotal_max = 8192;
13495         } else if (INTEL_GEN(dev_priv) >= 9 ||
13496                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
13497                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
13498                 vdisplay_max = 4096;
13499                 htotal_max = 8192;
13500                 vtotal_max = 8192;
13501         } else if (INTEL_GEN(dev_priv) >= 3) {
13502                 hdisplay_max = 4096;
13503                 vdisplay_max = 4096;
13504                 htotal_max = 8192;
13505                 vtotal_max = 8192;
13506         } else {
13507                 hdisplay_max = 2048;
13508                 vdisplay_max = 2048;
13509                 htotal_max = 4096;
13510                 vtotal_max = 4096;
13511         }
13512
13513         if (mode->hdisplay > hdisplay_max ||
13514             mode->hsync_start > htotal_max ||
13515             mode->hsync_end > htotal_max ||
13516             mode->htotal > htotal_max)
13517                 return MODE_H_ILLEGAL;
13518
13519         if (mode->vdisplay > vdisplay_max ||
13520             mode->vsync_start > vtotal_max ||
13521             mode->vsync_end > vtotal_max ||
13522             mode->vtotal > vtotal_max)
13523                 return MODE_V_ILLEGAL;
13524
13525         if (INTEL_GEN(dev_priv) >= 5) {
13526                 if (mode->hdisplay < 64 ||
13527                     mode->htotal - mode->hdisplay < 32)
13528                         return MODE_H_ILLEGAL;
13529
13530                 if (mode->vtotal - mode->vdisplay < 5)
13531                         return MODE_V_ILLEGAL;
13532         } else {
13533                 if (mode->htotal - mode->hdisplay < 32)
13534                         return MODE_H_ILLEGAL;
13535
13536                 if (mode->vtotal - mode->vdisplay < 3)
13537                         return MODE_V_ILLEGAL;
13538         }
13539
13540         return MODE_OK;
13541 }
13542
13543 enum drm_mode_status
13544 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
13545                                 const struct drm_display_mode *mode,
13546                                 bool bigjoiner)
13547 {
13548         int plane_width_max, plane_height_max;
13549
13550         /*
13551          * intel_mode_valid() should be
13552          * sufficient on older platforms.
13553          */
13554         if (INTEL_GEN(dev_priv) < 9)
13555                 return MODE_OK;
13556
13557         /*
13558          * Most people will probably want a fullscreen
13559          * plane so let's not advertize modes that are
13560          * too big for that.
13561          */
13562         if (INTEL_GEN(dev_priv) >= 11) {
13563                 plane_width_max = 5120 << bigjoiner;
13564                 plane_height_max = 4320;
13565         } else {
13566                 plane_width_max = 5120;
13567                 plane_height_max = 4096;
13568         }
13569
13570         if (mode->hdisplay > plane_width_max)
13571                 return MODE_H_ILLEGAL;
13572
13573         if (mode->vdisplay > plane_height_max)
13574                 return MODE_V_ILLEGAL;
13575
13576         return MODE_OK;
13577 }
13578
13579 static const struct drm_mode_config_funcs intel_mode_funcs = {
13580         .fb_create = intel_user_framebuffer_create,
13581         .get_format_info = intel_get_format_info,
13582         .output_poll_changed = intel_fbdev_output_poll_changed,
13583         .mode_valid = intel_mode_valid,
13584         .atomic_check = intel_atomic_check,
13585         .atomic_commit = intel_atomic_commit,
13586         .atomic_state_alloc = intel_atomic_state_alloc,
13587         .atomic_state_clear = intel_atomic_state_clear,
13588         .atomic_state_free = intel_atomic_state_free,
13589 };
13590
13591 /**
13592  * intel_init_display_hooks - initialize the display modesetting hooks
13593  * @dev_priv: device private
13594  */
13595 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
13596 {
13597         intel_init_cdclk_hooks(dev_priv);
13598
13599         intel_dpll_init_clock_hook(dev_priv);
13600
13601         if (INTEL_GEN(dev_priv) >= 9) {
13602                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
13603                 dev_priv->display.crtc_enable = hsw_crtc_enable;
13604                 dev_priv->display.crtc_disable = hsw_crtc_disable;
13605         } else if (HAS_DDI(dev_priv)) {
13606                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
13607                 dev_priv->display.crtc_enable = hsw_crtc_enable;
13608                 dev_priv->display.crtc_disable = hsw_crtc_disable;
13609         } else if (HAS_PCH_SPLIT(dev_priv)) {
13610                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
13611                 dev_priv->display.crtc_enable = ilk_crtc_enable;
13612                 dev_priv->display.crtc_disable = ilk_crtc_disable;
13613         } else if (IS_CHERRYVIEW(dev_priv) ||
13614                    IS_VALLEYVIEW(dev_priv)) {
13615                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
13616                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
13617                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
13618         } else {
13619                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
13620                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
13621                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
13622         }
13623
13624         intel_fdi_init_hook(dev_priv);
13625
13626         if (INTEL_GEN(dev_priv) >= 9) {
13627                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
13628                 dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
13629         } else {
13630                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
13631                 dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
13632         }
13633
13634 }
13635
13636 void intel_modeset_init_hw(struct drm_i915_private *i915)
13637 {
13638         struct intel_cdclk_state *cdclk_state =
13639                 to_intel_cdclk_state(i915->cdclk.obj.state);
13640
13641         intel_update_cdclk(i915);
13642         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
13643         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
13644 }
13645
13646 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
13647 {
13648         struct drm_plane *plane;
13649         struct intel_crtc *crtc;
13650
13651         for_each_intel_crtc(state->dev, crtc) {
13652                 struct intel_crtc_state *crtc_state;
13653
13654                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
13655                 if (IS_ERR(crtc_state))
13656                         return PTR_ERR(crtc_state);
13657
13658                 if (crtc_state->hw.active) {
13659                         /*
13660                          * Preserve the inherited flag to avoid
13661                          * taking the full modeset path.
13662                          */
13663                         crtc_state->inherited = true;
13664                 }
13665         }
13666
13667         drm_for_each_plane(plane, state->dev) {
13668                 struct drm_plane_state *plane_state;
13669
13670                 plane_state = drm_atomic_get_plane_state(state, plane);
13671                 if (IS_ERR(plane_state))
13672                         return PTR_ERR(plane_state);
13673         }
13674
13675         return 0;
13676 }
13677
13678 /*
13679  * Calculate what we think the watermarks should be for the state we've read
13680  * out of the hardware and then immediately program those watermarks so that
13681  * we ensure the hardware settings match our internal state.
13682  *
13683  * We can calculate what we think WM's should be by creating a duplicate of the
13684  * current state (which was constructed during hardware readout) and running it
13685  * through the atomic check code to calculate new watermark values in the
13686  * state object.
13687  */
13688 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
13689 {
13690         struct drm_atomic_state *state;
13691         struct intel_atomic_state *intel_state;
13692         struct intel_crtc *crtc;
13693         struct intel_crtc_state *crtc_state;
13694         struct drm_modeset_acquire_ctx ctx;
13695         int ret;
13696         int i;
13697
13698         /* Only supported on platforms that use atomic watermark design */
13699         if (!dev_priv->display.optimize_watermarks)
13700                 return;
13701
13702         state = drm_atomic_state_alloc(&dev_priv->drm);
13703         if (drm_WARN_ON(&dev_priv->drm, !state))
13704                 return;
13705
13706         intel_state = to_intel_atomic_state(state);
13707
13708         drm_modeset_acquire_init(&ctx, 0);
13709
13710 retry:
13711         state->acquire_ctx = &ctx;
13712
13713         /*
13714          * Hardware readout is the only time we don't want to calculate
13715          * intermediate watermarks (since we don't trust the current
13716          * watermarks).
13717          */
13718         if (!HAS_GMCH(dev_priv))
13719                 intel_state->skip_intermediate_wm = true;
13720
13721         ret = sanitize_watermarks_add_affected(state);
13722         if (ret)
13723                 goto fail;
13724
13725         ret = intel_atomic_check(&dev_priv->drm, state);
13726         if (ret)
13727                 goto fail;
13728
13729         /* Write calculated watermark values back */
13730         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
13731                 crtc_state->wm.need_postvbl_update = true;
13732                 dev_priv->display.optimize_watermarks(intel_state, crtc);
13733
13734                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
13735         }
13736
13737 fail:
13738         if (ret == -EDEADLK) {
13739                 drm_atomic_state_clear(state);
13740                 drm_modeset_backoff(&ctx);
13741                 goto retry;
13742         }
13743
13744         /*
13745          * If we fail here, it means that the hardware appears to be
13746          * programmed in a way that shouldn't be possible, given our
13747          * understanding of watermark requirements.  This might mean a
13748          * mistake in the hardware readout code or a mistake in the
13749          * watermark calculations for a given platform.  Raise a WARN
13750          * so that this is noticeable.
13751          *
13752          * If this actually happens, we'll have to just leave the
13753          * BIOS-programmed watermarks untouched and hope for the best.
13754          */
13755         drm_WARN(&dev_priv->drm, ret,
13756                  "Could not determine valid watermarks for inherited state\n");
13757
13758         drm_atomic_state_put(state);
13759
13760         drm_modeset_drop_locks(&ctx);
13761         drm_modeset_acquire_fini(&ctx);
13762 }
13763
13764 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
13765 {
13766         if (IS_GEN(dev_priv, 5)) {
13767                 u32 fdi_pll_clk =
13768                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
13769
13770                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
13771         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
13772                 dev_priv->fdi_pll_freq = 270000;
13773         } else {
13774                 return;
13775         }
13776
13777         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
13778 }
13779
13780 static int intel_initial_commit(struct drm_device *dev)
13781 {
13782         struct drm_atomic_state *state = NULL;
13783         struct drm_modeset_acquire_ctx ctx;
13784         struct intel_crtc *crtc;
13785         int ret = 0;
13786
13787         state = drm_atomic_state_alloc(dev);
13788         if (!state)
13789                 return -ENOMEM;
13790
13791         drm_modeset_acquire_init(&ctx, 0);
13792
13793 retry:
13794         state->acquire_ctx = &ctx;
13795
13796         for_each_intel_crtc(dev, crtc) {
13797                 struct intel_crtc_state *crtc_state =
13798                         intel_atomic_get_crtc_state(state, crtc);
13799
13800                 if (IS_ERR(crtc_state)) {
13801                         ret = PTR_ERR(crtc_state);
13802                         goto out;
13803                 }
13804
13805                 if (crtc_state->hw.active) {
13806                         struct intel_encoder *encoder;
13807
13808                         /*
13809                          * We've not yet detected sink capabilities
13810                          * (audio,infoframes,etc.) and thus we don't want to
13811                          * force a full state recomputation yet. We want that to
13812                          * happen only for the first real commit from userspace.
13813                          * So preserve the inherited flag for the time being.
13814                          */
13815                         crtc_state->inherited = true;
13816
13817                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
13818                         if (ret)
13819                                 goto out;
13820
13821                         /*
13822                          * FIXME hack to force a LUT update to avoid the
13823                          * plane update forcing the pipe gamma on without
13824                          * having a proper LUT loaded. Remove once we
13825                          * have readout for pipe gamma enable.
13826                          */
13827                         crtc_state->uapi.color_mgmt_changed = true;
13828
13829                         for_each_intel_encoder_mask(dev, encoder,
13830                                                     crtc_state->uapi.encoder_mask) {
13831                                 if (encoder->initial_fastset_check &&
13832                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
13833                                         ret = drm_atomic_add_affected_connectors(state,
13834                                                                                  &crtc->base);
13835                                         if (ret)
13836                                                 goto out;
13837                                 }
13838                         }
13839                 }
13840         }
13841
13842         ret = drm_atomic_commit(state);
13843
13844 out:
13845         if (ret == -EDEADLK) {
13846                 drm_atomic_state_clear(state);
13847                 drm_modeset_backoff(&ctx);
13848                 goto retry;
13849         }
13850
13851         drm_atomic_state_put(state);
13852
13853         drm_modeset_drop_locks(&ctx);
13854         drm_modeset_acquire_fini(&ctx);
13855
13856         return ret;
13857 }
13858
13859 static void intel_mode_config_init(struct drm_i915_private *i915)
13860 {
13861         struct drm_mode_config *mode_config = &i915->drm.mode_config;
13862
13863         drm_mode_config_init(&i915->drm);
13864         INIT_LIST_HEAD(&i915->global_obj_list);
13865
13866         mode_config->min_width = 0;
13867         mode_config->min_height = 0;
13868
13869         mode_config->preferred_depth = 24;
13870         mode_config->prefer_shadow = 1;
13871
13872         mode_config->allow_fb_modifiers = true;
13873
13874         mode_config->funcs = &intel_mode_funcs;
13875
13876         mode_config->async_page_flip = has_async_flips(i915);
13877
13878         /*
13879          * Maximum framebuffer dimensions, chosen to match
13880          * the maximum render engine surface size on gen4+.
13881          */
13882         if (INTEL_GEN(i915) >= 7) {
13883                 mode_config->max_width = 16384;
13884                 mode_config->max_height = 16384;
13885         } else if (INTEL_GEN(i915) >= 4) {
13886                 mode_config->max_width = 8192;
13887                 mode_config->max_height = 8192;
13888         } else if (IS_GEN(i915, 3)) {
13889                 mode_config->max_width = 4096;
13890                 mode_config->max_height = 4096;
13891         } else {
13892                 mode_config->max_width = 2048;
13893                 mode_config->max_height = 2048;
13894         }
13895
13896         if (IS_I845G(i915) || IS_I865G(i915)) {
13897                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
13898                 mode_config->cursor_height = 1023;
13899         } else if (IS_I830(i915) || IS_I85X(i915) ||
13900                    IS_I915G(i915) || IS_I915GM(i915)) {
13901                 mode_config->cursor_width = 64;
13902                 mode_config->cursor_height = 64;
13903         } else {
13904                 mode_config->cursor_width = 256;
13905                 mode_config->cursor_height = 256;
13906         }
13907 }
13908
13909 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
13910 {
13911         intel_atomic_global_obj_cleanup(i915);
13912         drm_mode_config_cleanup(&i915->drm);
13913 }
13914
13915 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
13916 {
13917         if (plane_config->fb) {
13918                 struct drm_framebuffer *fb = &plane_config->fb->base;
13919
13920                 /* We may only have the stub and not a full framebuffer */
13921                 if (drm_framebuffer_read_refcount(fb))
13922                         drm_framebuffer_put(fb);
13923                 else
13924                         kfree(fb);
13925         }
13926
13927         if (plane_config->vma)
13928                 i915_vma_put(plane_config->vma);
13929 }
13930
13931 /* part #1: call before irq install */
13932 int intel_modeset_init_noirq(struct drm_i915_private *i915)
13933 {
13934         int ret;
13935
13936         if (i915_inject_probe_failure(i915))
13937                 return -ENODEV;
13938
13939         if (HAS_DISPLAY(i915)) {
13940                 ret = drm_vblank_init(&i915->drm,
13941                                       INTEL_NUM_PIPES(i915));
13942                 if (ret)
13943                         return ret;
13944         }
13945
13946         intel_bios_init(i915);
13947
13948         ret = intel_vga_register(i915);
13949         if (ret)
13950                 goto cleanup_bios;
13951
13952         /* FIXME: completely on the wrong abstraction layer */
13953         intel_power_domains_init_hw(i915, false);
13954
13955         intel_csr_ucode_init(i915);
13956
13957         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
13958         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
13959                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
13960
13961         i915->framestart_delay = 1; /* 1-4 */
13962
13963         intel_mode_config_init(i915);
13964
13965         ret = intel_cdclk_init(i915);
13966         if (ret)
13967                 goto cleanup_vga_client_pw_domain_csr;
13968
13969         ret = intel_dbuf_init(i915);
13970         if (ret)
13971                 goto cleanup_vga_client_pw_domain_csr;
13972
13973         ret = intel_bw_init(i915);
13974         if (ret)
13975                 goto cleanup_vga_client_pw_domain_csr;
13976
13977         init_llist_head(&i915->atomic_helper.free_list);
13978         INIT_WORK(&i915->atomic_helper.free_work,
13979                   intel_atomic_helper_free_state_worker);
13980
13981         intel_init_quirks(i915);
13982
13983         intel_fbc_init(i915);
13984
13985         return 0;
13986
13987 cleanup_vga_client_pw_domain_csr:
13988         intel_csr_ucode_fini(i915);
13989         intel_power_domains_driver_remove(i915);
13990         intel_vga_unregister(i915);
13991 cleanup_bios:
13992         intel_bios_driver_remove(i915);
13993
13994         return ret;
13995 }
13996
13997 /* part #2: call after irq install, but before gem init */
13998 int intel_modeset_init_nogem(struct drm_i915_private *i915)
13999 {
14000         struct drm_device *dev = &i915->drm;
14001         enum pipe pipe;
14002         struct intel_crtc *crtc;
14003         int ret;
14004
14005         intel_init_pm(i915);
14006
14007         intel_panel_sanitize_ssc(i915);
14008
14009         intel_pps_setup(i915);
14010
14011         intel_gmbus_setup(i915);
14012
14013         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
14014                     INTEL_NUM_PIPES(i915),
14015                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
14016
14017         if (HAS_DISPLAY(i915)) {
14018                 for_each_pipe(i915, pipe) {
14019                         ret = intel_crtc_init(i915, pipe);
14020                         if (ret) {
14021                                 intel_mode_config_cleanup(i915);
14022                                 return ret;
14023                         }
14024                 }
14025         }
14026
14027         intel_plane_possible_crtcs_init(i915);
14028         intel_shared_dpll_init(dev);
14029         intel_update_fdi_pll_freq(i915);
14030
14031         intel_update_czclk(i915);
14032         intel_modeset_init_hw(i915);
14033
14034         intel_hdcp_component_init(i915);
14035
14036         if (i915->max_cdclk_freq == 0)
14037                 intel_update_max_cdclk(i915);
14038
14039         /*
14040          * If the platform has HTI, we need to find out whether it has reserved
14041          * any display resources before we create our display outputs.
14042          */
14043         if (INTEL_INFO(i915)->display.has_hti)
14044                 i915->hti_state = intel_de_read(i915, HDPORT_STATE);
14045
14046         /* Just disable it once at startup */
14047         intel_vga_disable(i915);
14048         intel_setup_outputs(i915);
14049
14050         drm_modeset_lock_all(dev);
14051         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
14052         drm_modeset_unlock_all(dev);
14053
14054         for_each_intel_crtc(dev, crtc) {
14055                 struct intel_initial_plane_config plane_config = {};
14056
14057                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
14058                         continue;
14059
14060                 /*
14061                  * Note that reserving the BIOS fb up front prevents us
14062                  * from stuffing other stolen allocations like the ring
14063                  * on top.  This prevents some ugliness at boot time, and
14064                  * can even allow for smooth boot transitions if the BIOS
14065                  * fb is large enough for the active pipe configuration.
14066                  */
14067                 i915->display.get_initial_plane_config(crtc, &plane_config);
14068
14069                 /*
14070                  * If the fb is shared between multiple heads, we'll
14071                  * just get the first one.
14072                  */
14073                 intel_find_initial_plane_obj(crtc, &plane_config);
14074
14075                 plane_config_fini(&plane_config);
14076         }
14077
14078         /*
14079          * Make sure hardware watermarks really match the state we read out.
14080          * Note that we need to do this after reconstructing the BIOS fb's
14081          * since the watermark calculation done here will use pstate->fb.
14082          */
14083         if (!HAS_GMCH(i915))
14084                 sanitize_watermarks(i915);
14085
14086         return 0;
14087 }
14088
14089 /* part #3: call after gem init */
14090 int intel_modeset_init(struct drm_i915_private *i915)
14091 {
14092         int ret;
14093
14094         if (!HAS_DISPLAY(i915))
14095                 return 0;
14096
14097         /*
14098          * Force all active planes to recompute their states. So that on
14099          * mode_setcrtc after probe, all the intel_plane_state variables
14100          * are already calculated and there is no assert_plane warnings
14101          * during bootup.
14102          */
14103         ret = intel_initial_commit(&i915->drm);
14104         if (ret)
14105                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
14106
14107         intel_overlay_setup(i915);
14108
14109         ret = intel_fbdev_init(&i915->drm);
14110         if (ret)
14111                 return ret;
14112
14113         /* Only enable hotplug handling once the fbdev is fully set up. */
14114         intel_hpd_init(i915);
14115         intel_hpd_poll_disable(i915);
14116
14117         intel_init_ipc(i915);
14118
14119         return 0;
14120 }
14121
14122 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14123 {
14124         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14125         /* 640x480@60Hz, ~25175 kHz */
14126         struct dpll clock = {
14127                 .m1 = 18,
14128                 .m2 = 7,
14129                 .p1 = 13,
14130                 .p2 = 4,
14131                 .n = 2,
14132         };
14133         u32 dpll, fp;
14134         int i;
14135
14136         drm_WARN_ON(&dev_priv->drm,
14137                     i9xx_calc_dpll_params(48000, &clock) != 25154);
14138
14139         drm_dbg_kms(&dev_priv->drm,
14140                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
14141                     pipe_name(pipe), clock.vco, clock.dot);
14142
14143         fp = i9xx_dpll_compute_fp(&clock);
14144         dpll = DPLL_DVO_2X_MODE |
14145                 DPLL_VGA_MODE_DIS |
14146                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
14147                 PLL_P2_DIVIDE_BY_4 |
14148                 PLL_REF_INPUT_DREFCLK |
14149                 DPLL_VCO_ENABLE;
14150
14151         intel_de_write(dev_priv, FP0(pipe), fp);
14152         intel_de_write(dev_priv, FP1(pipe), fp);
14153
14154         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
14155         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
14156         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
14157         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
14158         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
14159         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
14160         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
14161
14162         /*
14163          * Apparently we need to have VGA mode enabled prior to changing
14164          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
14165          * dividers, even though the register value does change.
14166          */
14167         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
14168         intel_de_write(dev_priv, DPLL(pipe), dpll);
14169
14170         /* Wait for the clocks to stabilize. */
14171         intel_de_posting_read(dev_priv, DPLL(pipe));
14172         udelay(150);
14173
14174         /* The pixel multiplier can only be updated once the
14175          * DPLL is enabled and the clocks are stable.
14176          *
14177          * So write it again.
14178          */
14179         intel_de_write(dev_priv, DPLL(pipe), dpll);
14180
14181         /* We do this three times for luck */
14182         for (i = 0; i < 3 ; i++) {
14183                 intel_de_write(dev_priv, DPLL(pipe), dpll);
14184                 intel_de_posting_read(dev_priv, DPLL(pipe));
14185                 udelay(150); /* wait for warmup */
14186         }
14187
14188         intel_de_write(dev_priv, PIPECONF(pipe),
14189                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
14190         intel_de_posting_read(dev_priv, PIPECONF(pipe));
14191
14192         intel_wait_for_pipe_scanline_moving(crtc);
14193 }
14194
14195 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14196 {
14197         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14198
14199         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
14200                     pipe_name(pipe));
14201
14202         drm_WARN_ON(&dev_priv->drm,
14203                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
14204                     DISPLAY_PLANE_ENABLE);
14205         drm_WARN_ON(&dev_priv->drm,
14206                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
14207                     DISPLAY_PLANE_ENABLE);
14208         drm_WARN_ON(&dev_priv->drm,
14209                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
14210                     DISPLAY_PLANE_ENABLE);
14211         drm_WARN_ON(&dev_priv->drm,
14212                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
14213         drm_WARN_ON(&dev_priv->drm,
14214                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
14215
14216         intel_de_write(dev_priv, PIPECONF(pipe), 0);
14217         intel_de_posting_read(dev_priv, PIPECONF(pipe));
14218
14219         intel_wait_for_pipe_scanline_stopped(crtc);
14220
14221         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
14222         intel_de_posting_read(dev_priv, DPLL(pipe));
14223 }
14224
14225 static void
14226 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
14227 {
14228         struct intel_crtc *crtc;
14229
14230         if (INTEL_GEN(dev_priv) >= 4)
14231                 return;
14232
14233         for_each_intel_crtc(&dev_priv->drm, crtc) {
14234                 struct intel_plane *plane =
14235                         to_intel_plane(crtc->base.primary);
14236                 struct intel_crtc *plane_crtc;
14237                 enum pipe pipe;
14238
14239                 if (!plane->get_hw_state(plane, &pipe))
14240                         continue;
14241
14242                 if (pipe == crtc->pipe)
14243                         continue;
14244
14245                 drm_dbg_kms(&dev_priv->drm,
14246                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
14247                             plane->base.base.id, plane->base.name);
14248
14249                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14250                 intel_plane_disable_noatomic(plane_crtc, plane);
14251         }
14252 }
14253
14254 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
14255 {
14256         struct drm_device *dev = crtc->base.dev;
14257         struct intel_encoder *encoder;
14258
14259         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
14260                 return true;
14261
14262         return false;
14263 }
14264
14265 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
14266 {
14267         struct drm_device *dev = encoder->base.dev;
14268         struct intel_connector *connector;
14269
14270         for_each_connector_on_encoder(dev, &encoder->base, connector)
14271                 return connector;
14272
14273         return NULL;
14274 }
14275
14276 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
14277                               enum pipe pch_transcoder)
14278 {
14279         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
14280                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
14281 }
14282
14283 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
14284 {
14285         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14286         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14287         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
14288
14289         if (INTEL_GEN(dev_priv) >= 9 ||
14290             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14291                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
14292                 u32 val;
14293
14294                 if (transcoder_is_dsi(cpu_transcoder))
14295                         return;
14296
14297                 val = intel_de_read(dev_priv, reg);
14298                 val &= ~HSW_FRAME_START_DELAY_MASK;
14299                 val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14300                 intel_de_write(dev_priv, reg, val);
14301         } else {
14302                 i915_reg_t reg = PIPECONF(cpu_transcoder);
14303                 u32 val;
14304
14305                 val = intel_de_read(dev_priv, reg);
14306                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
14307                 val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14308                 intel_de_write(dev_priv, reg, val);
14309         }
14310
14311         if (!crtc_state->has_pch_encoder)
14312                 return;
14313
14314         if (HAS_PCH_IBX(dev_priv)) {
14315                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
14316                 u32 val;
14317
14318                 val = intel_de_read(dev_priv, reg);
14319                 val &= ~TRANS_FRAME_START_DELAY_MASK;
14320                 val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14321                 intel_de_write(dev_priv, reg, val);
14322         } else {
14323                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
14324                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
14325                 u32 val;
14326
14327                 val = intel_de_read(dev_priv, reg);
14328                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
14329                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
14330                 intel_de_write(dev_priv, reg, val);
14331         }
14332 }
14333
14334 static void intel_sanitize_crtc(struct intel_crtc *crtc,
14335                                 struct drm_modeset_acquire_ctx *ctx)
14336 {
14337         struct drm_device *dev = crtc->base.dev;
14338         struct drm_i915_private *dev_priv = to_i915(dev);
14339         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
14340
14341         if (crtc_state->hw.active) {
14342                 struct intel_plane *plane;
14343
14344                 /* Clear any frame start delays used for debugging left by the BIOS */
14345                 intel_sanitize_frame_start_delay(crtc_state);
14346
14347                 /* Disable everything but the primary plane */
14348                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
14349                         const struct intel_plane_state *plane_state =
14350                                 to_intel_plane_state(plane->base.state);
14351
14352                         if (plane_state->uapi.visible &&
14353                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
14354                                 intel_plane_disable_noatomic(crtc, plane);
14355                 }
14356
14357                 /*
14358                  * Disable any background color set by the BIOS, but enable the
14359                  * gamma and CSC to match how we program our planes.
14360                  */
14361                 if (INTEL_GEN(dev_priv) >= 9)
14362                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
14363                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
14364         }
14365
14366         /* Adjust the state of the output pipe according to whether we
14367          * have active connectors/encoders. */
14368         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
14369             !crtc_state->bigjoiner_slave)
14370                 intel_crtc_disable_noatomic(crtc, ctx);
14371
14372         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
14373                 /*
14374                  * We start out with underrun reporting disabled to avoid races.
14375                  * For correct bookkeeping mark this on active crtcs.
14376                  *
14377                  * Also on gmch platforms we dont have any hardware bits to
14378                  * disable the underrun reporting. Which means we need to start
14379                  * out with underrun reporting disabled also on inactive pipes,
14380                  * since otherwise we'll complain about the garbage we read when
14381                  * e.g. coming up after runtime pm.
14382                  *
14383                  * No protection against concurrent access is required - at
14384                  * worst a fifo underrun happens which also sets this to false.
14385                  */
14386                 crtc->cpu_fifo_underrun_disabled = true;
14387                 /*
14388                  * We track the PCH trancoder underrun reporting state
14389                  * within the crtc. With crtc for pipe A housing the underrun
14390                  * reporting state for PCH transcoder A, crtc for pipe B housing
14391                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
14392                  * and marking underrun reporting as disabled for the non-existing
14393                  * PCH transcoders B and C would prevent enabling the south
14394                  * error interrupt (see cpt_can_enable_serr_int()).
14395                  */
14396                 if (has_pch_trancoder(dev_priv, crtc->pipe))
14397                         crtc->pch_fifo_underrun_disabled = true;
14398         }
14399 }
14400
14401 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
14402 {
14403         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
14404
14405         /*
14406          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
14407          * the hardware when a high res displays plugged in. DPLL P
14408          * divider is zero, and the pipe timings are bonkers. We'll
14409          * try to disable everything in that case.
14410          *
14411          * FIXME would be nice to be able to sanitize this state
14412          * without several WARNs, but for now let's take the easy
14413          * road.
14414          */
14415         return IS_GEN(dev_priv, 6) &&
14416                 crtc_state->hw.active &&
14417                 crtc_state->shared_dpll &&
14418                 crtc_state->port_clock == 0;
14419 }
14420
14421 static void intel_sanitize_encoder(struct intel_encoder *encoder)
14422 {
14423         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
14424         struct intel_connector *connector;
14425         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
14426         struct intel_crtc_state *crtc_state = crtc ?
14427                 to_intel_crtc_state(crtc->base.state) : NULL;
14428
14429         /* We need to check both for a crtc link (meaning that the
14430          * encoder is active and trying to read from a pipe) and the
14431          * pipe itself being active. */
14432         bool has_active_crtc = crtc_state &&
14433                 crtc_state->hw.active;
14434
14435         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
14436                 drm_dbg_kms(&dev_priv->drm,
14437                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
14438                             pipe_name(crtc->pipe));
14439                 has_active_crtc = false;
14440         }
14441
14442         connector = intel_encoder_find_connector(encoder);
14443         if (connector && !has_active_crtc) {
14444                 drm_dbg_kms(&dev_priv->drm,
14445                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
14446                             encoder->base.base.id,
14447                             encoder->base.name);
14448
14449                 /* Connector is active, but has no active pipe. This is
14450                  * fallout from our resume register restoring. Disable
14451                  * the encoder manually again. */
14452                 if (crtc_state) {
14453                         struct drm_encoder *best_encoder;
14454
14455                         drm_dbg_kms(&dev_priv->drm,
14456                                     "[ENCODER:%d:%s] manually disabled\n",
14457                                     encoder->base.base.id,
14458                                     encoder->base.name);
14459
14460                         /* avoid oopsing in case the hooks consult best_encoder */
14461                         best_encoder = connector->base.state->best_encoder;
14462                         connector->base.state->best_encoder = &encoder->base;
14463
14464                         /* FIXME NULL atomic state passed! */
14465                         if (encoder->disable)
14466                                 encoder->disable(NULL, encoder, crtc_state,
14467                                                  connector->base.state);
14468                         if (encoder->post_disable)
14469                                 encoder->post_disable(NULL, encoder, crtc_state,
14470                                                       connector->base.state);
14471
14472                         connector->base.state->best_encoder = best_encoder;
14473                 }
14474                 encoder->base.crtc = NULL;
14475
14476                 /* Inconsistent output/port/pipe state happens presumably due to
14477                  * a bug in one of the get_hw_state functions. Or someplace else
14478                  * in our code, like the register restore mess on resume. Clamp
14479                  * things to off as a safer default. */
14480
14481                 connector->base.dpms = DRM_MODE_DPMS_OFF;
14482                 connector->base.encoder = NULL;
14483         }
14484
14485         /* notify opregion of the sanitized encoder state */
14486         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
14487
14488         if (INTEL_GEN(dev_priv) >= 11)
14489                 icl_sanitize_encoder_pll_mapping(encoder);
14490 }
14491
14492 /* FIXME read out full plane state for all planes */
14493 static void readout_plane_state(struct drm_i915_private *dev_priv)
14494 {
14495         struct intel_plane *plane;
14496         struct intel_crtc *crtc;
14497
14498         for_each_intel_plane(&dev_priv->drm, plane) {
14499                 struct intel_plane_state *plane_state =
14500                         to_intel_plane_state(plane->base.state);
14501                 struct intel_crtc_state *crtc_state;
14502                 enum pipe pipe = PIPE_A;
14503                 bool visible;
14504
14505                 visible = plane->get_hw_state(plane, &pipe);
14506
14507                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14508                 crtc_state = to_intel_crtc_state(crtc->base.state);
14509
14510                 intel_set_plane_visible(crtc_state, plane_state, visible);
14511
14512                 drm_dbg_kms(&dev_priv->drm,
14513                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
14514                             plane->base.base.id, plane->base.name,
14515                             enableddisabled(visible), pipe_name(pipe));
14516         }
14517
14518         for_each_intel_crtc(&dev_priv->drm, crtc) {
14519                 struct intel_crtc_state *crtc_state =
14520                         to_intel_crtc_state(crtc->base.state);
14521
14522                 fixup_plane_bitmasks(crtc_state);
14523         }
14524 }
14525
14526 static void intel_modeset_readout_hw_state(struct drm_device *dev)
14527 {
14528         struct drm_i915_private *dev_priv = to_i915(dev);
14529         struct intel_cdclk_state *cdclk_state =
14530                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
14531         struct intel_dbuf_state *dbuf_state =
14532                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
14533         enum pipe pipe;
14534         struct intel_crtc *crtc;
14535         struct intel_encoder *encoder;
14536         struct intel_connector *connector;
14537         struct drm_connector_list_iter conn_iter;
14538         u8 active_pipes = 0;
14539
14540         for_each_intel_crtc(dev, crtc) {
14541                 struct intel_crtc_state *crtc_state =
14542                         to_intel_crtc_state(crtc->base.state);
14543
14544                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
14545                 intel_crtc_free_hw_state(crtc_state);
14546                 intel_crtc_state_reset(crtc_state, crtc);
14547
14548                 intel_crtc_get_pipe_config(crtc_state);
14549
14550                 crtc_state->hw.enable = crtc_state->hw.active;
14551
14552                 crtc->base.enabled = crtc_state->hw.enable;
14553                 crtc->active = crtc_state->hw.active;
14554
14555                 if (crtc_state->hw.active)
14556                         active_pipes |= BIT(crtc->pipe);
14557
14558                 drm_dbg_kms(&dev_priv->drm,
14559                             "[CRTC:%d:%s] hw state readout: %s\n",
14560                             crtc->base.base.id, crtc->base.name,
14561                             enableddisabled(crtc_state->hw.active));
14562         }
14563
14564         dev_priv->active_pipes = cdclk_state->active_pipes =
14565                 dbuf_state->active_pipes = active_pipes;
14566
14567         readout_plane_state(dev_priv);
14568
14569         intel_dpll_readout_hw_state(dev_priv);
14570
14571         for_each_intel_encoder(dev, encoder) {
14572                 pipe = 0;
14573
14574                 if (encoder->get_hw_state(encoder, &pipe)) {
14575                         struct intel_crtc_state *crtc_state;
14576
14577                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14578                         crtc_state = to_intel_crtc_state(crtc->base.state);
14579
14580                         encoder->base.crtc = &crtc->base;
14581                         intel_encoder_get_config(encoder, crtc_state);
14582                         if (encoder->sync_state)
14583                                 encoder->sync_state(encoder, crtc_state);
14584
14585                         /* read out to slave crtc as well for bigjoiner */
14586                         if (crtc_state->bigjoiner) {
14587                                 /* encoder should read be linked to bigjoiner master */
14588                                 WARN_ON(crtc_state->bigjoiner_slave);
14589
14590                                 crtc = crtc_state->bigjoiner_linked_crtc;
14591                                 crtc_state = to_intel_crtc_state(crtc->base.state);
14592                                 intel_encoder_get_config(encoder, crtc_state);
14593                         }
14594                 } else {
14595                         encoder->base.crtc = NULL;
14596                 }
14597
14598                 drm_dbg_kms(&dev_priv->drm,
14599                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
14600                             encoder->base.base.id, encoder->base.name,
14601                             enableddisabled(encoder->base.crtc),
14602                             pipe_name(pipe));
14603         }
14604
14605         drm_connector_list_iter_begin(dev, &conn_iter);
14606         for_each_intel_connector_iter(connector, &conn_iter) {
14607                 if (connector->get_hw_state(connector)) {
14608                         struct intel_crtc_state *crtc_state;
14609                         struct intel_crtc *crtc;
14610
14611                         connector->base.dpms = DRM_MODE_DPMS_ON;
14612
14613                         encoder = intel_attached_encoder(connector);
14614                         connector->base.encoder = &encoder->base;
14615
14616                         crtc = to_intel_crtc(encoder->base.crtc);
14617                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
14618
14619                         if (crtc_state && crtc_state->hw.active) {
14620                                 /*
14621                                  * This has to be done during hardware readout
14622                                  * because anything calling .crtc_disable may
14623                                  * rely on the connector_mask being accurate.
14624                                  */
14625                                 crtc_state->uapi.connector_mask |=
14626                                         drm_connector_mask(&connector->base);
14627                                 crtc_state->uapi.encoder_mask |=
14628                                         drm_encoder_mask(&encoder->base);
14629                         }
14630                 } else {
14631                         connector->base.dpms = DRM_MODE_DPMS_OFF;
14632                         connector->base.encoder = NULL;
14633                 }
14634                 drm_dbg_kms(&dev_priv->drm,
14635                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
14636                             connector->base.base.id, connector->base.name,
14637                             enableddisabled(connector->base.encoder));
14638         }
14639         drm_connector_list_iter_end(&conn_iter);
14640
14641         for_each_intel_crtc(dev, crtc) {
14642                 struct intel_bw_state *bw_state =
14643                         to_intel_bw_state(dev_priv->bw_obj.state);
14644                 struct intel_crtc_state *crtc_state =
14645                         to_intel_crtc_state(crtc->base.state);
14646                 struct intel_plane *plane;
14647                 int min_cdclk = 0;
14648
14649                 if (crtc_state->bigjoiner_slave)
14650                         continue;
14651
14652                 if (crtc_state->hw.active) {
14653                         /*
14654                          * The initial mode needs to be set in order to keep
14655                          * the atomic core happy. It wants a valid mode if the
14656                          * crtc's enabled, so we do the above call.
14657                          *
14658                          * But we don't set all the derived state fully, hence
14659                          * set a flag to indicate that a full recalculation is
14660                          * needed on the next commit.
14661                          */
14662                         crtc_state->inherited = true;
14663
14664                         intel_crtc_update_active_timings(crtc_state);
14665
14666                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
14667                 }
14668
14669                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14670                         const struct intel_plane_state *plane_state =
14671                                 to_intel_plane_state(plane->base.state);
14672
14673                         /*
14674                          * FIXME don't have the fb yet, so can't
14675                          * use intel_plane_data_rate() :(
14676                          */
14677                         if (plane_state->uapi.visible)
14678                                 crtc_state->data_rate[plane->id] =
14679                                         4 * crtc_state->pixel_rate;
14680                         /*
14681                          * FIXME don't have the fb yet, so can't
14682                          * use plane->min_cdclk() :(
14683                          */
14684                         if (plane_state->uapi.visible && plane->min_cdclk) {
14685                                 if (crtc_state->double_wide ||
14686                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
14687                                         crtc_state->min_cdclk[plane->id] =
14688                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
14689                                 else
14690                                         crtc_state->min_cdclk[plane->id] =
14691                                                 crtc_state->pixel_rate;
14692                         }
14693                         drm_dbg_kms(&dev_priv->drm,
14694                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
14695                                     plane->base.base.id, plane->base.name,
14696                                     crtc_state->min_cdclk[plane->id]);
14697                 }
14698
14699                 if (crtc_state->hw.active) {
14700                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
14701                         if (drm_WARN_ON(dev, min_cdclk < 0))
14702                                 min_cdclk = 0;
14703                 }
14704
14705                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
14706                 cdclk_state->min_voltage_level[crtc->pipe] =
14707                         crtc_state->min_voltage_level;
14708
14709                 intel_bw_crtc_update(bw_state, crtc_state);
14710
14711                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
14712
14713                 /* discard our incomplete slave state, copy it from master */
14714                 if (crtc_state->bigjoiner && crtc_state->hw.active) {
14715                         struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
14716                         struct intel_crtc_state *slave_crtc_state =
14717                                 to_intel_crtc_state(slave->base.state);
14718
14719                         copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
14720                         slave->base.mode = crtc->base.mode;
14721
14722                         cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
14723                         cdclk_state->min_voltage_level[slave->pipe] =
14724                                 crtc_state->min_voltage_level;
14725
14726                         for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
14727                                 const struct intel_plane_state *plane_state =
14728                                         to_intel_plane_state(plane->base.state);
14729
14730                                 /*
14731                                  * FIXME don't have the fb yet, so can't
14732                                  * use intel_plane_data_rate() :(
14733                                  */
14734                                 if (plane_state->uapi.visible)
14735                                         crtc_state->data_rate[plane->id] =
14736                                                 4 * crtc_state->pixel_rate;
14737                                 else
14738                                         crtc_state->data_rate[plane->id] = 0;
14739                         }
14740
14741                         intel_bw_crtc_update(bw_state, slave_crtc_state);
14742                         drm_calc_timestamping_constants(&slave->base,
14743                                                         &slave_crtc_state->hw.adjusted_mode);
14744                 }
14745         }
14746 }
14747
14748 static void
14749 get_encoder_power_domains(struct drm_i915_private *dev_priv)
14750 {
14751         struct intel_encoder *encoder;
14752
14753         for_each_intel_encoder(&dev_priv->drm, encoder) {
14754                 struct intel_crtc_state *crtc_state;
14755
14756                 if (!encoder->get_power_domains)
14757                         continue;
14758
14759                 /*
14760                  * MST-primary and inactive encoders don't have a crtc state
14761                  * and neither of these require any power domain references.
14762                  */
14763                 if (!encoder->base.crtc)
14764                         continue;
14765
14766                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
14767                 encoder->get_power_domains(encoder, crtc_state);
14768         }
14769 }
14770
14771 static void intel_early_display_was(struct drm_i915_private *dev_priv)
14772 {
14773         /*
14774          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
14775          * Also known as Wa_14010480278.
14776          */
14777         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
14778                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
14779                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
14780
14781         if (IS_HASWELL(dev_priv)) {
14782                 /*
14783                  * WaRsPkgCStateDisplayPMReq:hsw
14784                  * System hang if this isn't done before disabling all planes!
14785                  */
14786                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
14787                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
14788         }
14789
14790         if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
14791                 /* Display WA #1142:kbl,cfl,cml */
14792                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
14793                              KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
14794                 intel_de_rmw(dev_priv, CHICKEN_MISC_2,
14795                              KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
14796                              KBL_ARB_FILL_SPARE_14);
14797         }
14798 }
14799
14800 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
14801                                        enum port port, i915_reg_t hdmi_reg)
14802 {
14803         u32 val = intel_de_read(dev_priv, hdmi_reg);
14804
14805         if (val & SDVO_ENABLE ||
14806             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
14807                 return;
14808
14809         drm_dbg_kms(&dev_priv->drm,
14810                     "Sanitizing transcoder select for HDMI %c\n",
14811                     port_name(port));
14812
14813         val &= ~SDVO_PIPE_SEL_MASK;
14814         val |= SDVO_PIPE_SEL(PIPE_A);
14815
14816         intel_de_write(dev_priv, hdmi_reg, val);
14817 }
14818
14819 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
14820                                      enum port port, i915_reg_t dp_reg)
14821 {
14822         u32 val = intel_de_read(dev_priv, dp_reg);
14823
14824         if (val & DP_PORT_EN ||
14825             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
14826                 return;
14827
14828         drm_dbg_kms(&dev_priv->drm,
14829                     "Sanitizing transcoder select for DP %c\n",
14830                     port_name(port));
14831
14832         val &= ~DP_PIPE_SEL_MASK;
14833         val |= DP_PIPE_SEL(PIPE_A);
14834
14835         intel_de_write(dev_priv, dp_reg, val);
14836 }
14837
14838 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
14839 {
14840         /*
14841          * The BIOS may select transcoder B on some of the PCH
14842          * ports even it doesn't enable the port. This would trip
14843          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
14844          * Sanitize the transcoder select bits to prevent that. We
14845          * assume that the BIOS never actually enabled the port,
14846          * because if it did we'd actually have to toggle the port
14847          * on and back off to make the transcoder A select stick
14848          * (see. intel_dp_link_down(), intel_disable_hdmi(),
14849          * intel_disable_sdvo()).
14850          */
14851         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
14852         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
14853         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
14854
14855         /* PCH SDVOB multiplex with HDMIB */
14856         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
14857         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
14858         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
14859 }
14860
14861 /* Scan out the current hw modeset state,
14862  * and sanitizes it to the current state
14863  */
14864 static void
14865 intel_modeset_setup_hw_state(struct drm_device *dev,
14866                              struct drm_modeset_acquire_ctx *ctx)
14867 {
14868         struct drm_i915_private *dev_priv = to_i915(dev);
14869         struct intel_encoder *encoder;
14870         struct intel_crtc *crtc;
14871         intel_wakeref_t wakeref;
14872
14873         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
14874
14875         intel_early_display_was(dev_priv);
14876         intel_modeset_readout_hw_state(dev);
14877
14878         /* HW state is read out, now we need to sanitize this mess. */
14879
14880         /* Sanitize the TypeC port mode upfront, encoders depend on this */
14881         for_each_intel_encoder(dev, encoder) {
14882                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
14883
14884                 /* We need to sanitize only the MST primary port. */
14885                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
14886                     intel_phy_is_tc(dev_priv, phy))
14887                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
14888         }
14889
14890         get_encoder_power_domains(dev_priv);
14891
14892         if (HAS_PCH_IBX(dev_priv))
14893                 ibx_sanitize_pch_ports(dev_priv);
14894
14895         /*
14896          * intel_sanitize_plane_mapping() may need to do vblank
14897          * waits, so we need vblank interrupts restored beforehand.
14898          */
14899         for_each_intel_crtc(&dev_priv->drm, crtc) {
14900                 struct intel_crtc_state *crtc_state =
14901                         to_intel_crtc_state(crtc->base.state);
14902
14903                 drm_crtc_vblank_reset(&crtc->base);
14904
14905                 if (crtc_state->hw.active)
14906                         intel_crtc_vblank_on(crtc_state);
14907         }
14908
14909         intel_sanitize_plane_mapping(dev_priv);
14910
14911         for_each_intel_encoder(dev, encoder)
14912                 intel_sanitize_encoder(encoder);
14913
14914         for_each_intel_crtc(&dev_priv->drm, crtc) {
14915                 struct intel_crtc_state *crtc_state =
14916                         to_intel_crtc_state(crtc->base.state);
14917
14918                 intel_sanitize_crtc(crtc, ctx);
14919                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
14920         }
14921
14922         intel_modeset_update_connector_atomic_state(dev);
14923
14924         intel_dpll_sanitize_state(dev_priv);
14925
14926         if (IS_G4X(dev_priv)) {
14927                 g4x_wm_get_hw_state(dev_priv);
14928                 g4x_wm_sanitize(dev_priv);
14929         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14930                 vlv_wm_get_hw_state(dev_priv);
14931                 vlv_wm_sanitize(dev_priv);
14932         } else if (INTEL_GEN(dev_priv) >= 9) {
14933                 skl_wm_get_hw_state(dev_priv);
14934         } else if (HAS_PCH_SPLIT(dev_priv)) {
14935                 ilk_wm_get_hw_state(dev_priv);
14936         }
14937
14938         for_each_intel_crtc(dev, crtc) {
14939                 struct intel_crtc_state *crtc_state =
14940                         to_intel_crtc_state(crtc->base.state);
14941                 u64 put_domains;
14942
14943                 put_domains = modeset_get_crtc_power_domains(crtc_state);
14944                 if (drm_WARN_ON(dev, put_domains))
14945                         modeset_put_crtc_power_domains(crtc, put_domains);
14946         }
14947
14948         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
14949 }
14950
14951 void intel_display_resume(struct drm_device *dev)
14952 {
14953         struct drm_i915_private *dev_priv = to_i915(dev);
14954         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
14955         struct drm_modeset_acquire_ctx ctx;
14956         int ret;
14957
14958         dev_priv->modeset_restore_state = NULL;
14959         if (state)
14960                 state->acquire_ctx = &ctx;
14961
14962         drm_modeset_acquire_init(&ctx, 0);
14963
14964         while (1) {
14965                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
14966                 if (ret != -EDEADLK)
14967                         break;
14968
14969                 drm_modeset_backoff(&ctx);
14970         }
14971
14972         if (!ret)
14973                 ret = __intel_display_resume(dev, state, &ctx);
14974
14975         intel_enable_ipc(dev_priv);
14976         drm_modeset_drop_locks(&ctx);
14977         drm_modeset_acquire_fini(&ctx);
14978
14979         if (ret)
14980                 drm_err(&dev_priv->drm,
14981                         "Restoring old state failed with %i\n", ret);
14982         if (state)
14983                 drm_atomic_state_put(state);
14984 }
14985
14986 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
14987 {
14988         struct intel_connector *connector;
14989         struct drm_connector_list_iter conn_iter;
14990
14991         /* Kill all the work that may have been queued by hpd. */
14992         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
14993         for_each_intel_connector_iter(connector, &conn_iter) {
14994                 if (connector->modeset_retry_work.func)
14995                         cancel_work_sync(&connector->modeset_retry_work);
14996                 if (connector->hdcp.shim) {
14997                         cancel_delayed_work_sync(&connector->hdcp.check_work);
14998                         cancel_work_sync(&connector->hdcp.prop_work);
14999                 }
15000         }
15001         drm_connector_list_iter_end(&conn_iter);
15002 }
15003
15004 /* part #1: call before irq uninstall */
15005 void intel_modeset_driver_remove(struct drm_i915_private *i915)
15006 {
15007         flush_workqueue(i915->flip_wq);
15008         flush_workqueue(i915->modeset_wq);
15009
15010         flush_work(&i915->atomic_helper.free_work);
15011         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
15012 }
15013
15014 /* part #2: call after irq uninstall */
15015 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
15016 {
15017         /*
15018          * Due to the hpd irq storm handling the hotplug work can re-arm the
15019          * poll handlers. Hence disable polling after hpd handling is shut down.
15020          */
15021         intel_hpd_poll_fini(i915);
15022
15023         /*
15024          * MST topology needs to be suspended so we don't have any calls to
15025          * fbdev after it's finalized. MST will be destroyed later as part of
15026          * drm_mode_config_cleanup()
15027          */
15028         intel_dp_mst_suspend(i915);
15029
15030         /* poll work can call into fbdev, hence clean that up afterwards */
15031         intel_fbdev_fini(i915);
15032
15033         intel_unregister_dsm_handler();
15034
15035         intel_fbc_global_disable(i915);
15036
15037         /* flush any delayed tasks or pending work */
15038         flush_scheduled_work();
15039
15040         intel_hdcp_component_fini(i915);
15041
15042         intel_mode_config_cleanup(i915);
15043
15044         intel_overlay_cleanup(i915);
15045
15046         intel_gmbus_teardown(i915);
15047
15048         destroy_workqueue(i915->flip_wq);
15049         destroy_workqueue(i915->modeset_wq);
15050
15051         intel_fbc_cleanup_cfb(i915);
15052 }
15053
15054 /* part #3: call after gem init */
15055 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
15056 {
15057         intel_csr_ucode_fini(i915);
15058
15059         intel_power_domains_driver_remove(i915);
15060
15061         intel_vga_unregister(i915);
15062
15063         intel_bios_driver_remove(i915);
15064 }
15065
15066 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
15067
15068 struct intel_display_error_state {
15069
15070         u32 power_well_driver;
15071
15072         struct intel_cursor_error_state {
15073                 u32 control;
15074                 u32 position;
15075                 u32 base;
15076                 u32 size;
15077         } cursor[I915_MAX_PIPES];
15078
15079         struct intel_pipe_error_state {
15080                 bool power_domain_on;
15081                 u32 source;
15082                 u32 stat;
15083         } pipe[I915_MAX_PIPES];
15084
15085         struct intel_plane_error_state {
15086                 u32 control;
15087                 u32 stride;
15088                 u32 size;
15089                 u32 pos;
15090                 u32 addr;
15091                 u32 surface;
15092                 u32 tile_offset;
15093         } plane[I915_MAX_PIPES];
15094
15095         struct intel_transcoder_error_state {
15096                 bool available;
15097                 bool power_domain_on;
15098                 enum transcoder cpu_transcoder;
15099
15100                 u32 conf;
15101
15102                 u32 htotal;
15103                 u32 hblank;
15104                 u32 hsync;
15105                 u32 vtotal;
15106                 u32 vblank;
15107                 u32 vsync;
15108         } transcoder[5];
15109 };
15110
15111 struct intel_display_error_state *
15112 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
15113 {
15114         struct intel_display_error_state *error;
15115         int transcoders[] = {
15116                 TRANSCODER_A,
15117                 TRANSCODER_B,
15118                 TRANSCODER_C,
15119                 TRANSCODER_D,
15120                 TRANSCODER_EDP,
15121         };
15122         int i;
15123
15124         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
15125
15126         if (!HAS_DISPLAY(dev_priv))
15127                 return NULL;
15128
15129         error = kzalloc(sizeof(*error), GFP_ATOMIC);
15130         if (error == NULL)
15131                 return NULL;
15132
15133         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15134                 error->power_well_driver = intel_de_read(dev_priv,
15135                                                          HSW_PWR_WELL_CTL2);
15136
15137         for_each_pipe(dev_priv, i) {
15138                 error->pipe[i].power_domain_on =
15139                         __intel_display_power_is_enabled(dev_priv,
15140                                                          POWER_DOMAIN_PIPE(i));
15141                 if (!error->pipe[i].power_domain_on)
15142                         continue;
15143
15144                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
15145                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
15146                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
15147
15148                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
15149                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
15150                 if (INTEL_GEN(dev_priv) <= 3) {
15151                         error->plane[i].size = intel_de_read(dev_priv,
15152                                                              DSPSIZE(i));
15153                         error->plane[i].pos = intel_de_read(dev_priv,
15154                                                             DSPPOS(i));
15155                 }
15156                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15157                         error->plane[i].addr = intel_de_read(dev_priv,
15158                                                              DSPADDR(i));
15159                 if (INTEL_GEN(dev_priv) >= 4) {
15160                         error->plane[i].surface = intel_de_read(dev_priv,
15161                                                                 DSPSURF(i));
15162                         error->plane[i].tile_offset = intel_de_read(dev_priv,
15163                                                                     DSPTILEOFF(i));
15164                 }
15165
15166                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
15167
15168                 if (HAS_GMCH(dev_priv))
15169                         error->pipe[i].stat = intel_de_read(dev_priv,
15170                                                             PIPESTAT(i));
15171         }
15172
15173         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
15174                 enum transcoder cpu_transcoder = transcoders[i];
15175
15176                 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
15177                         continue;
15178
15179                 error->transcoder[i].available = true;
15180                 error->transcoder[i].power_domain_on =
15181                         __intel_display_power_is_enabled(dev_priv,
15182                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15183                 if (!error->transcoder[i].power_domain_on)
15184                         continue;
15185
15186                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15187
15188                 error->transcoder[i].conf = intel_de_read(dev_priv,
15189                                                           PIPECONF(cpu_transcoder));
15190                 error->transcoder[i].htotal = intel_de_read(dev_priv,
15191                                                             HTOTAL(cpu_transcoder));
15192                 error->transcoder[i].hblank = intel_de_read(dev_priv,
15193                                                             HBLANK(cpu_transcoder));
15194                 error->transcoder[i].hsync = intel_de_read(dev_priv,
15195                                                            HSYNC(cpu_transcoder));
15196                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
15197                                                             VTOTAL(cpu_transcoder));
15198                 error->transcoder[i].vblank = intel_de_read(dev_priv,
15199                                                             VBLANK(cpu_transcoder));
15200                 error->transcoder[i].vsync = intel_de_read(dev_priv,
15201                                                            VSYNC(cpu_transcoder));
15202         }
15203
15204         return error;
15205 }
15206
15207 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15208
15209 void
15210 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15211                                 struct intel_display_error_state *error)
15212 {
15213         struct drm_i915_private *dev_priv = m->i915;
15214         int i;
15215
15216         if (!error)
15217                 return;
15218
15219         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
15220         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15221                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
15222                            error->power_well_driver);
15223         for_each_pipe(dev_priv, i) {
15224                 err_printf(m, "Pipe [%d]:\n", i);
15225                 err_printf(m, "  Power: %s\n",
15226                            onoff(error->pipe[i].power_domain_on));
15227                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
15228                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
15229
15230                 err_printf(m, "Plane [%d]:\n", i);
15231                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
15232                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
15233                 if (INTEL_GEN(dev_priv) <= 3) {
15234                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
15235                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
15236                 }
15237                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15238                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
15239                 if (INTEL_GEN(dev_priv) >= 4) {
15240                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
15241                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
15242                 }
15243
15244                 err_printf(m, "Cursor [%d]:\n", i);
15245                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
15246                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
15247                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
15248         }
15249
15250         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
15251                 if (!error->transcoder[i].available)
15252                         continue;
15253
15254                 err_printf(m, "CPU transcoder: %s\n",
15255                            transcoder_name(error->transcoder[i].cpu_transcoder));
15256                 err_printf(m, "  Power: %s\n",
15257                            onoff(error->transcoder[i].power_domain_on));
15258                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
15259                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
15260                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
15261                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
15262                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
15263                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
15264                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
15265         }
15266 }
15267
15268 #endif