Merge tag 'drm-intel-next-2023-03-07' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dma-resv.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/string_helpers.h>
34 #include <linux/vga_switcheroo.h>
35 #include <acpi/video.h>
36
37 #include <drm/display/drm_dp_helper.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_uapi.h>
41 #include <drm/drm_damage_helper.h>
42 #include <drm/drm_edid.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_privacy_screen_consumer.h>
45 #include <drm/drm_probe_helper.h>
46 #include <drm/drm_rect.h>
47
48 #include "gem/i915_gem_lmem.h"
49 #include "gem/i915_gem_object.h"
50
51 #include "g4x_dp.h"
52 #include "g4x_hdmi.h"
53 #include "hsw_ips.h"
54 #include "i915_drv.h"
55 #include "i915_reg.h"
56 #include "i915_utils.h"
57 #include "i9xx_plane.h"
58 #include "i9xx_wm.h"
59 #include "icl_dsi.h"
60 #include "intel_acpi.h"
61 #include "intel_atomic.h"
62 #include "intel_atomic_plane.h"
63 #include "intel_audio.h"
64 #include "intel_bw.h"
65 #include "intel_cdclk.h"
66 #include "intel_color.h"
67 #include "intel_crt.h"
68 #include "intel_crtc.h"
69 #include "intel_crtc_state_dump.h"
70 #include "intel_ddi.h"
71 #include "intel_de.h"
72 #include "intel_display_debugfs.h"
73 #include "intel_display_power.h"
74 #include "intel_display_types.h"
75 #include "intel_dmc.h"
76 #include "intel_dp.h"
77 #include "intel_dp_link_training.h"
78 #include "intel_dp_mst.h"
79 #include "intel_dpio_phy.h"
80 #include "intel_dpll.h"
81 #include "intel_dpll_mgr.h"
82 #include "intel_dpt.h"
83 #include "intel_drrs.h"
84 #include "intel_dsi.h"
85 #include "intel_dvo.h"
86 #include "intel_fb.h"
87 #include "intel_fbc.h"
88 #include "intel_fbdev.h"
89 #include "intel_fdi.h"
90 #include "intel_fifo_underrun.h"
91 #include "intel_frontbuffer.h"
92 #include "intel_gmbus.h"
93 #include "intel_hdcp.h"
94 #include "intel_hdmi.h"
95 #include "intel_hotplug.h"
96 #include "intel_hti.h"
97 #include "intel_lvds.h"
98 #include "intel_lvds_regs.h"
99 #include "intel_modeset_setup.h"
100 #include "intel_modeset_verify.h"
101 #include "intel_overlay.h"
102 #include "intel_panel.h"
103 #include "intel_pch_display.h"
104 #include "intel_pch_refclk.h"
105 #include "intel_pcode.h"
106 #include "intel_pipe_crc.h"
107 #include "intel_plane_initial.h"
108 #include "intel_pm.h"
109 #include "intel_pps.h"
110 #include "intel_psr.h"
111 #include "intel_quirks.h"
112 #include "intel_sdvo.h"
113 #include "intel_snps_phy.h"
114 #include "intel_sprite.h"
115 #include "intel_tc.h"
116 #include "intel_tv.h"
117 #include "intel_vblank.h"
118 #include "intel_vdsc.h"
119 #include "intel_vdsc_regs.h"
120 #include "intel_vga.h"
121 #include "intel_vrr.h"
122 #include "intel_wm.h"
123 #include "skl_scaler.h"
124 #include "skl_universal_plane.h"
125 #include "skl_watermark.h"
126 #include "vlv_dsi.h"
127 #include "vlv_dsi_pll.h"
128 #include "vlv_dsi_regs.h"
129 #include "vlv_sideband.h"
130
131 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
132 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
133 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
134 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
135 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
136
137 /* returns HPLL frequency in kHz */
138 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
139 {
140         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
141
142         /* Obtain SKU information */
143         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144                 CCK_FUSE_HPLL_FREQ_MASK;
145
146         return vco_freq[hpll_freq] * 1000;
147 }
148
149 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
150                       const char *name, u32 reg, int ref_freq)
151 {
152         u32 val;
153         int divider;
154
155         val = vlv_cck_read(dev_priv, reg);
156         divider = val & CCK_FREQUENCY_VALUES;
157
158         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
159                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
160                  "%s change in progress\n", name);
161
162         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
163 }
164
165 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
166                            const char *name, u32 reg)
167 {
168         int hpll;
169
170         vlv_cck_get(dev_priv);
171
172         if (dev_priv->hpll_freq == 0)
173                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
174
175         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
176
177         vlv_cck_put(dev_priv);
178
179         return hpll;
180 }
181
182 static void intel_update_czclk(struct drm_i915_private *dev_priv)
183 {
184         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
185                 return;
186
187         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
188                                                       CCK_CZ_CLOCK_CONTROL);
189
190         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
191                 dev_priv->czclk_freq);
192 }
193
194 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
195 {
196         return (crtc_state->active_planes &
197                 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
198 }
199
200 /* WA Display #0827: Gen9:all */
201 static void
202 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
203 {
204         if (enable)
205                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
206                              0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
207         else
208                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
209                              DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
210 }
211
212 /* Wa_2006604312:icl,ehl */
213 static void
214 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
215                        bool enable)
216 {
217         if (enable)
218                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
219         else
220                 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
221 }
222
223 /* Wa_1604331009:icl,jsl,ehl */
224 static void
225 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
226                        bool enable)
227 {
228         intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
229                      enable ? CURSOR_GATING_DIS : 0);
230 }
231
232 static bool
233 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
234 {
235         return crtc_state->master_transcoder != INVALID_TRANSCODER;
236 }
237
238 static bool
239 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
240 {
241         return crtc_state->sync_mode_slaves_mask != 0;
242 }
243
244 bool
245 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
246 {
247         return is_trans_port_sync_master(crtc_state) ||
248                 is_trans_port_sync_slave(crtc_state);
249 }
250
251 static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
252 {
253         return ffs(crtc_state->bigjoiner_pipes) - 1;
254 }
255
256 u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
257 {
258         if (crtc_state->bigjoiner_pipes)
259                 return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
260         else
261                 return 0;
262 }
263
264 bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
265 {
266         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
267
268         return crtc_state->bigjoiner_pipes &&
269                 crtc->pipe != bigjoiner_master_pipe(crtc_state);
270 }
271
272 bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
273 {
274         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
275
276         return crtc_state->bigjoiner_pipes &&
277                 crtc->pipe == bigjoiner_master_pipe(crtc_state);
278 }
279
280 static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
281 {
282         return hweight8(crtc_state->bigjoiner_pipes);
283 }
284
285 struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
286 {
287         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
288
289         if (intel_crtc_is_bigjoiner_slave(crtc_state))
290                 return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
291         else
292                 return to_intel_crtc(crtc_state->uapi.crtc);
293 }
294
295 static void
296 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
297 {
298         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
299         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
300
301         if (DISPLAY_VER(dev_priv) >= 4) {
302                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
303
304                 /* Wait for the Pipe State to go off */
305                 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
306                                             TRANSCONF_STATE_ENABLE, 100))
307                         drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
308         } else {
309                 intel_wait_for_pipe_scanline_stopped(crtc);
310         }
311 }
312
313 void assert_transcoder(struct drm_i915_private *dev_priv,
314                        enum transcoder cpu_transcoder, bool state)
315 {
316         bool cur_state;
317         enum intel_display_power_domain power_domain;
318         intel_wakeref_t wakeref;
319
320         /* we keep both pipes enabled on 830 */
321         if (IS_I830(dev_priv))
322                 state = true;
323
324         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
325         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
326         if (wakeref) {
327                 u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
328                 cur_state = !!(val & TRANSCONF_ENABLE);
329
330                 intel_display_power_put(dev_priv, power_domain, wakeref);
331         } else {
332                 cur_state = false;
333         }
334
335         I915_STATE_WARN(cur_state != state,
336                         "transcoder %s assertion failure (expected %s, current %s)\n",
337                         transcoder_name(cpu_transcoder),
338                         str_on_off(state), str_on_off(cur_state));
339 }
340
341 static void assert_plane(struct intel_plane *plane, bool state)
342 {
343         enum pipe pipe;
344         bool cur_state;
345
346         cur_state = plane->get_hw_state(plane, &pipe);
347
348         I915_STATE_WARN(cur_state != state,
349                         "%s assertion failure (expected %s, current %s)\n",
350                         plane->base.name, str_on_off(state),
351                         str_on_off(cur_state));
352 }
353
354 #define assert_plane_enabled(p) assert_plane(p, true)
355 #define assert_plane_disabled(p) assert_plane(p, false)
356
357 static void assert_planes_disabled(struct intel_crtc *crtc)
358 {
359         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
360         struct intel_plane *plane;
361
362         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
363                 assert_plane_disabled(plane);
364 }
365
366 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
367                          struct intel_digital_port *dig_port,
368                          unsigned int expected_mask)
369 {
370         u32 port_mask;
371         i915_reg_t dpll_reg;
372
373         switch (dig_port->base.port) {
374         default:
375                 MISSING_CASE(dig_port->base.port);
376                 fallthrough;
377         case PORT_B:
378                 port_mask = DPLL_PORTB_READY_MASK;
379                 dpll_reg = DPLL(0);
380                 break;
381         case PORT_C:
382                 port_mask = DPLL_PORTC_READY_MASK;
383                 dpll_reg = DPLL(0);
384                 expected_mask <<= 4;
385                 break;
386         case PORT_D:
387                 port_mask = DPLL_PORTD_READY_MASK;
388                 dpll_reg = DPIO_PHY_STATUS;
389                 break;
390         }
391
392         if (intel_de_wait_for_register(dev_priv, dpll_reg,
393                                        port_mask, expected_mask, 1000))
394                 drm_WARN(&dev_priv->drm, 1,
395                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
396                          dig_port->base.base.base.id, dig_port->base.base.name,
397                          intel_de_read(dev_priv, dpll_reg) & port_mask,
398                          expected_mask);
399 }
400
401 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
402 {
403         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
404         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
405         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
406         enum pipe pipe = crtc->pipe;
407         i915_reg_t reg;
408         u32 val;
409
410         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
411
412         assert_planes_disabled(crtc);
413
414         /*
415          * A pipe without a PLL won't actually be able to drive bits from
416          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
417          * need the check.
418          */
419         if (HAS_GMCH(dev_priv)) {
420                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
421                         assert_dsi_pll_enabled(dev_priv);
422                 else
423                         assert_pll_enabled(dev_priv, pipe);
424         } else {
425                 if (new_crtc_state->has_pch_encoder) {
426                         /* if driving the PCH, we need FDI enabled */
427                         assert_fdi_rx_pll_enabled(dev_priv,
428                                                   intel_crtc_pch_transcoder(crtc));
429                         assert_fdi_tx_pll_enabled(dev_priv,
430                                                   (enum pipe) cpu_transcoder);
431                 }
432                 /* FIXME: assert CPU port conditions for SNB+ */
433         }
434
435         /* Wa_22012358565:adl-p */
436         if (DISPLAY_VER(dev_priv) == 13)
437                 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
438                              0, PIPE_ARB_USE_PROG_SLOTS);
439
440         reg = TRANSCONF(cpu_transcoder);
441         val = intel_de_read(dev_priv, reg);
442         if (val & TRANSCONF_ENABLE) {
443                 /* we keep both pipes enabled on 830 */
444                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
445                 return;
446         }
447
448         intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
449         intel_de_posting_read(dev_priv, reg);
450
451         /*
452          * Until the pipe starts PIPEDSL reads will return a stale value,
453          * which causes an apparent vblank timestamp jump when PIPEDSL
454          * resets to its proper value. That also messes up the frame count
455          * when it's derived from the timestamps. So let's wait for the
456          * pipe to start properly before we call drm_crtc_vblank_on()
457          */
458         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
459                 intel_wait_for_pipe_scanline_moving(crtc);
460 }
461
462 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
463 {
464         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
465         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
466         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
467         enum pipe pipe = crtc->pipe;
468         i915_reg_t reg;
469         u32 val;
470
471         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
472
473         /*
474          * Make sure planes won't keep trying to pump pixels to us,
475          * or we might hang the display.
476          */
477         assert_planes_disabled(crtc);
478
479         reg = TRANSCONF(cpu_transcoder);
480         val = intel_de_read(dev_priv, reg);
481         if ((val & TRANSCONF_ENABLE) == 0)
482                 return;
483
484         /*
485          * Double wide has implications for planes
486          * so best keep it disabled when not needed.
487          */
488         if (old_crtc_state->double_wide)
489                 val &= ~TRANSCONF_DOUBLE_WIDE;
490
491         /* Don't disable pipe or pipe PLLs if needed */
492         if (!IS_I830(dev_priv))
493                 val &= ~TRANSCONF_ENABLE;
494
495         if (DISPLAY_VER(dev_priv) >= 14)
496                 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
497                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
498         else if (DISPLAY_VER(dev_priv) >= 12)
499                 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
500                              FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
501
502         intel_de_write(dev_priv, reg, val);
503         if ((val & TRANSCONF_ENABLE) == 0)
504                 intel_wait_for_pipe_off(old_crtc_state);
505 }
506
507 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
508 {
509         unsigned int size = 0;
510         int i;
511
512         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
513                 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
514
515         return size;
516 }
517
518 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
519 {
520         unsigned int size = 0;
521         int i;
522
523         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
524                 unsigned int plane_size;
525
526                 if (rem_info->plane[i].linear)
527                         plane_size = rem_info->plane[i].size;
528                 else
529                         plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
530
531                 if (plane_size == 0)
532                         continue;
533
534                 if (rem_info->plane_alignment)
535                         size = ALIGN(size, rem_info->plane_alignment);
536
537                 size += plane_size;
538         }
539
540         return size;
541 }
542
543 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
544 {
545         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
546         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
547
548         return DISPLAY_VER(dev_priv) < 4 ||
549                 (plane->fbc &&
550                  plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
551 }
552
553 /*
554  * Convert the x/y offsets into a linear offset.
555  * Only valid with 0/180 degree rotation, which is fine since linear
556  * offset is only used with linear buffers on pre-hsw and tiled buffers
557  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
558  */
559 u32 intel_fb_xy_to_linear(int x, int y,
560                           const struct intel_plane_state *state,
561                           int color_plane)
562 {
563         const struct drm_framebuffer *fb = state->hw.fb;
564         unsigned int cpp = fb->format->cpp[color_plane];
565         unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
566
567         return y * pitch + x * cpp;
568 }
569
570 /*
571  * Add the x/y offsets derived from fb->offsets[] to the user
572  * specified plane src x/y offsets. The resulting x/y offsets
573  * specify the start of scanout from the beginning of the gtt mapping.
574  */
575 void intel_add_fb_offsets(int *x, int *y,
576                           const struct intel_plane_state *state,
577                           int color_plane)
578
579 {
580         *x += state->view.color_plane[color_plane].x;
581         *y += state->view.color_plane[color_plane].y;
582 }
583
584 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
585                               u32 pixel_format, u64 modifier)
586 {
587         struct intel_crtc *crtc;
588         struct intel_plane *plane;
589
590         if (!HAS_DISPLAY(dev_priv))
591                 return 0;
592
593         /*
594          * We assume the primary plane for pipe A has
595          * the highest stride limits of them all,
596          * if in case pipe A is disabled, use the first pipe from pipe_mask.
597          */
598         crtc = intel_first_crtc(dev_priv);
599         if (!crtc)
600                 return 0;
601
602         plane = to_intel_plane(crtc->base.primary);
603
604         return plane->max_stride(plane, pixel_format, modifier,
605                                  DRM_MODE_ROTATE_0);
606 }
607
608 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
609                              struct intel_plane_state *plane_state,
610                              bool visible)
611 {
612         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
613
614         plane_state->uapi.visible = visible;
615
616         if (visible)
617                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
618         else
619                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
620 }
621
622 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
623 {
624         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
625         struct drm_plane *plane;
626
627         /*
628          * Active_planes aliases if multiple "primary" or cursor planes
629          * have been used on the same (or wrong) pipe. plane_mask uses
630          * unique ids, hence we can use that to reconstruct active_planes.
631          */
632         crtc_state->enabled_planes = 0;
633         crtc_state->active_planes = 0;
634
635         drm_for_each_plane_mask(plane, &dev_priv->drm,
636                                 crtc_state->uapi.plane_mask) {
637                 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
638                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
639         }
640 }
641
642 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
643                                   struct intel_plane *plane)
644 {
645         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
646         struct intel_crtc_state *crtc_state =
647                 to_intel_crtc_state(crtc->base.state);
648         struct intel_plane_state *plane_state =
649                 to_intel_plane_state(plane->base.state);
650
651         drm_dbg_kms(&dev_priv->drm,
652                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
653                     plane->base.base.id, plane->base.name,
654                     crtc->base.base.id, crtc->base.name);
655
656         intel_set_plane_visible(crtc_state, plane_state, false);
657         intel_plane_fixup_bitmasks(crtc_state);
658         crtc_state->data_rate[plane->id] = 0;
659         crtc_state->data_rate_y[plane->id] = 0;
660         crtc_state->rel_data_rate[plane->id] = 0;
661         crtc_state->rel_data_rate_y[plane->id] = 0;
662         crtc_state->min_cdclk[plane->id] = 0;
663
664         if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
665             hsw_ips_disable(crtc_state)) {
666                 crtc_state->ips_enabled = false;
667                 intel_crtc_wait_for_next_vblank(crtc);
668         }
669
670         /*
671          * Vblank time updates from the shadow to live plane control register
672          * are blocked if the memory self-refresh mode is active at that
673          * moment. So to make sure the plane gets truly disabled, disable
674          * first the self-refresh mode. The self-refresh enable bit in turn
675          * will be checked/applied by the HW only at the next frame start
676          * event which is after the vblank start event, so we need to have a
677          * wait-for-vblank between disabling the plane and the pipe.
678          */
679         if (HAS_GMCH(dev_priv) &&
680             intel_set_memory_cxsr(dev_priv, false))
681                 intel_crtc_wait_for_next_vblank(crtc);
682
683         /*
684          * Gen2 reports pipe underruns whenever all planes are disabled.
685          * So disable underrun reporting before all the planes get disabled.
686          */
687         if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
688                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
689
690         intel_plane_disable_arm(plane, crtc_state);
691         intel_crtc_wait_for_next_vblank(crtc);
692 }
693
694 unsigned int
695 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
696 {
697         int x = 0, y = 0;
698
699         intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
700                                           plane_state->view.color_plane[0].offset, 0);
701
702         return y;
703 }
704
705 static int
706 intel_display_commit_duplicated_state(struct intel_atomic_state *state,
707                                       struct drm_modeset_acquire_ctx *ctx)
708 {
709         struct drm_i915_private *i915 = to_i915(state->base.dev);
710         int ret;
711
712         ret = drm_atomic_helper_commit_duplicated_state(&state->base, ctx);
713
714         drm_WARN_ON(&i915->drm, ret == -EDEADLK);
715
716         return ret;
717 }
718
719 static int
720 __intel_display_resume(struct drm_i915_private *i915,
721                        struct drm_atomic_state *state,
722                        struct drm_modeset_acquire_ctx *ctx)
723 {
724         struct drm_crtc_state *crtc_state;
725         struct drm_crtc *crtc;
726         int i;
727
728         intel_modeset_setup_hw_state(i915, ctx);
729         intel_vga_redisable(i915);
730
731         if (!state)
732                 return 0;
733
734         /*
735          * We've duplicated the state, pointers to the old state are invalid.
736          *
737          * Don't attempt to use the old state until we commit the duplicated state.
738          */
739         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
740                 /*
741                  * Force recalculation even if we restore
742                  * current state. With fast modeset this may not result
743                  * in a modeset when the state is compatible.
744                  */
745                 crtc_state->mode_changed = true;
746         }
747
748         /* ignore any reset values/BIOS leftovers in the WM registers */
749         if (!HAS_GMCH(i915))
750                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
751
752         return intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
753 }
754
755 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
756 {
757         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
758                 intel_has_gpu_reset(to_gt(dev_priv)));
759 }
760
761 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
762 {
763         struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
764         struct drm_atomic_state *state;
765         int ret;
766
767         if (!HAS_DISPLAY(dev_priv))
768                 return;
769
770         /* reset doesn't touch the display */
771         if (!dev_priv->params.force_reset_modeset_test &&
772             !gpu_reset_clobbers_display(dev_priv))
773                 return;
774
775         /* We have a modeset vs reset deadlock, defensively unbreak it. */
776         set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
777         smp_mb__after_atomic();
778         wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
779
780         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
781                 drm_dbg_kms(&dev_priv->drm,
782                             "Modeset potentially stuck, unbreaking through wedging\n");
783                 intel_gt_set_wedged(to_gt(dev_priv));
784         }
785
786         /*
787          * Need mode_config.mutex so that we don't
788          * trample ongoing ->detect() and whatnot.
789          */
790         mutex_lock(&dev_priv->drm.mode_config.mutex);
791         drm_modeset_acquire_init(ctx, 0);
792         while (1) {
793                 ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
794                 if (ret != -EDEADLK)
795                         break;
796
797                 drm_modeset_backoff(ctx);
798         }
799         /*
800          * Disabling the crtcs gracefully seems nicer. Also the
801          * g33 docs say we should at least disable all the planes.
802          */
803         state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
804         if (IS_ERR(state)) {
805                 ret = PTR_ERR(state);
806                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
807                         ret);
808                 return;
809         }
810
811         ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
812         if (ret) {
813                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
814                         ret);
815                 drm_atomic_state_put(state);
816                 return;
817         }
818
819         dev_priv->display.restore.modeset_state = state;
820         state->acquire_ctx = ctx;
821 }
822
823 void intel_display_finish_reset(struct drm_i915_private *i915)
824 {
825         struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
826         struct drm_atomic_state *state;
827         int ret;
828
829         if (!HAS_DISPLAY(i915))
830                 return;
831
832         /* reset doesn't touch the display */
833         if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
834                 return;
835
836         state = fetch_and_zero(&i915->display.restore.modeset_state);
837         if (!state)
838                 goto unlock;
839
840         /* reset doesn't touch the display */
841         if (!gpu_reset_clobbers_display(i915)) {
842                 /* for testing only restore the display */
843                 ret = intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
844                 if (ret)
845                         drm_err(&i915->drm,
846                                 "Restoring old state failed with %i\n", ret);
847         } else {
848                 /*
849                  * The display has been reset as well,
850                  * so need a full re-initialization.
851                  */
852                 intel_pps_unlock_regs_wa(i915);
853                 intel_modeset_init_hw(i915);
854                 intel_init_clock_gating(i915);
855                 intel_hpd_init(i915);
856
857                 ret = __intel_display_resume(i915, state, ctx);
858                 if (ret)
859                         drm_err(&i915->drm,
860                                 "Restoring old state failed with %i\n", ret);
861
862                 intel_hpd_poll_disable(i915);
863         }
864
865         drm_atomic_state_put(state);
866 unlock:
867         drm_modeset_drop_locks(ctx);
868         drm_modeset_acquire_fini(ctx);
869         mutex_unlock(&i915->drm.mode_config.mutex);
870
871         clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
872 }
873
874 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
875 {
876         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
877         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
878         enum pipe pipe = crtc->pipe;
879         u32 tmp;
880
881         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
882
883         /*
884          * Display WA #1153: icl
885          * enable hardware to bypass the alpha math
886          * and rounding for per-pixel values 00 and 0xff
887          */
888         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
889         /*
890          * Display WA # 1605353570: icl
891          * Set the pixel rounding bit to 1 for allowing
892          * passthrough of Frame buffer pixels unmodified
893          * across pipe
894          */
895         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
896
897         /*
898          * Underrun recovery must always be disabled on display 13+.
899          * DG2 chicken bit meaning is inverted compared to other platforms.
900          */
901         if (IS_DG2(dev_priv))
902                 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
903         else if (DISPLAY_VER(dev_priv) >= 13)
904                 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
905
906         /* Wa_14010547955:dg2 */
907         if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
908                 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
909
910         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
911 }
912
913 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
914 {
915         struct drm_crtc *crtc;
916         bool cleanup_done;
917
918         drm_for_each_crtc(crtc, &dev_priv->drm) {
919                 struct drm_crtc_commit *commit;
920                 spin_lock(&crtc->commit_lock);
921                 commit = list_first_entry_or_null(&crtc->commit_list,
922                                                   struct drm_crtc_commit, commit_entry);
923                 cleanup_done = commit ?
924                         try_wait_for_completion(&commit->cleanup_done) : true;
925                 spin_unlock(&crtc->commit_lock);
926
927                 if (cleanup_done)
928                         continue;
929
930                 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
931
932                 return true;
933         }
934
935         return false;
936 }
937
938 /*
939  * Finds the encoder associated with the given CRTC. This can only be
940  * used when we know that the CRTC isn't feeding multiple encoders!
941  */
942 struct intel_encoder *
943 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
944                            const struct intel_crtc_state *crtc_state)
945 {
946         const struct drm_connector_state *connector_state;
947         const struct drm_connector *connector;
948         struct intel_encoder *encoder = NULL;
949         struct intel_crtc *master_crtc;
950         int num_encoders = 0;
951         int i;
952
953         master_crtc = intel_master_crtc(crtc_state);
954
955         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
956                 if (connector_state->crtc != &master_crtc->base)
957                         continue;
958
959                 encoder = to_intel_encoder(connector_state->best_encoder);
960                 num_encoders++;
961         }
962
963         drm_WARN(encoder->base.dev, num_encoders != 1,
964                  "%d encoders for pipe %c\n",
965                  num_encoders, pipe_name(master_crtc->pipe));
966
967         return encoder;
968 }
969
970 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
971 {
972         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
973         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
974         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
975         enum pipe pipe = crtc->pipe;
976         int width = drm_rect_width(dst);
977         int height = drm_rect_height(dst);
978         int x = dst->x1;
979         int y = dst->y1;
980
981         if (!crtc_state->pch_pfit.enabled)
982                 return;
983
984         /* Force use of hard-coded filter coefficients
985          * as some pre-programmed values are broken,
986          * e.g. x201.
987          */
988         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
989                 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
990                                   PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
991         else
992                 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
993                                   PF_FILTER_MED_3x3);
994         intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
995         intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
996 }
997
998 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
999 {
1000         if (crtc->overlay)
1001                 (void) intel_overlay_switch_off(crtc->overlay);
1002
1003         /* Let userspace switch the overlay on again. In most cases userspace
1004          * has to recompute where to put it anyway.
1005          */
1006 }
1007
1008 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1009 {
1010         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1011
1012         if (!crtc_state->nv12_planes)
1013                 return false;
1014
1015         /* WA Display #0827: Gen9:all */
1016         if (DISPLAY_VER(dev_priv) == 9)
1017                 return true;
1018
1019         return false;
1020 }
1021
1022 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1023 {
1024         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1025
1026         /* Wa_2006604312:icl,ehl */
1027         if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1028                 return true;
1029
1030         return false;
1031 }
1032
1033 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1034 {
1035         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1036
1037         /* Wa_1604331009:icl,jsl,ehl */
1038         if (is_hdr_mode(crtc_state) &&
1039             crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1040             DISPLAY_VER(dev_priv) == 11)
1041                 return true;
1042
1043         return false;
1044 }
1045
1046 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1047                                     enum pipe pipe, bool enable)
1048 {
1049         if (DISPLAY_VER(i915) == 9) {
1050                 /*
1051                  * "Plane N strech max must be programmed to 11b (x1)
1052                  *  when Async flips are enabled on that plane."
1053                  */
1054                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1055                              SKL_PLANE1_STRETCH_MAX_MASK,
1056                              enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1057         } else {
1058                 /* Also needed on HSW/BDW albeit undocumented */
1059                 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1060                              HSW_PRI_STRETCH_MAX_MASK,
1061                              enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1062         }
1063 }
1064
1065 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1066 {
1067         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1068
1069         return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
1070                 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1071 }
1072
1073 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1074                             const struct intel_crtc_state *new_crtc_state)
1075 {
1076         return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1077                 new_crtc_state->active_planes;
1078 }
1079
1080 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1081                              const struct intel_crtc_state *new_crtc_state)
1082 {
1083         return old_crtc_state->active_planes &&
1084                 (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1085 }
1086
1087 static void intel_post_plane_update(struct intel_atomic_state *state,
1088                                     struct intel_crtc *crtc)
1089 {
1090         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1091         const struct intel_crtc_state *old_crtc_state =
1092                 intel_atomic_get_old_crtc_state(state, crtc);
1093         const struct intel_crtc_state *new_crtc_state =
1094                 intel_atomic_get_new_crtc_state(state, crtc);
1095         enum pipe pipe = crtc->pipe;
1096
1097         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1098
1099         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1100                 intel_update_watermarks(dev_priv);
1101
1102         intel_fbc_post_update(state, crtc);
1103
1104         if (needs_async_flip_vtd_wa(old_crtc_state) &&
1105             !needs_async_flip_vtd_wa(new_crtc_state))
1106                 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1107
1108         if (needs_nv12_wa(old_crtc_state) &&
1109             !needs_nv12_wa(new_crtc_state))
1110                 skl_wa_827(dev_priv, pipe, false);
1111
1112         if (needs_scalerclk_wa(old_crtc_state) &&
1113             !needs_scalerclk_wa(new_crtc_state))
1114                 icl_wa_scalerclkgating(dev_priv, pipe, false);
1115
1116         if (needs_cursorclk_wa(old_crtc_state) &&
1117             !needs_cursorclk_wa(new_crtc_state))
1118                 icl_wa_cursorclkgating(dev_priv, pipe, false);
1119 }
1120
1121 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1122                                         struct intel_crtc *crtc)
1123 {
1124         const struct intel_crtc_state *crtc_state =
1125                 intel_atomic_get_new_crtc_state(state, crtc);
1126         u8 update_planes = crtc_state->update_planes;
1127         const struct intel_plane_state *plane_state;
1128         struct intel_plane *plane;
1129         int i;
1130
1131         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1132                 if (plane->pipe == crtc->pipe &&
1133                     update_planes & BIT(plane->id))
1134                         plane->enable_flip_done(plane);
1135         }
1136 }
1137
1138 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1139                                          struct intel_crtc *crtc)
1140 {
1141         const struct intel_crtc_state *crtc_state =
1142                 intel_atomic_get_new_crtc_state(state, crtc);
1143         u8 update_planes = crtc_state->update_planes;
1144         const struct intel_plane_state *plane_state;
1145         struct intel_plane *plane;
1146         int i;
1147
1148         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1149                 if (plane->pipe == crtc->pipe &&
1150                     update_planes & BIT(plane->id))
1151                         plane->disable_flip_done(plane);
1152         }
1153 }
1154
1155 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1156                                              struct intel_crtc *crtc)
1157 {
1158         const struct intel_crtc_state *old_crtc_state =
1159                 intel_atomic_get_old_crtc_state(state, crtc);
1160         const struct intel_crtc_state *new_crtc_state =
1161                 intel_atomic_get_new_crtc_state(state, crtc);
1162         u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
1163                                        ~new_crtc_state->async_flip_planes;
1164         const struct intel_plane_state *old_plane_state;
1165         struct intel_plane *plane;
1166         bool need_vbl_wait = false;
1167         int i;
1168
1169         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1170                 if (plane->need_async_flip_disable_wa &&
1171                     plane->pipe == crtc->pipe &&
1172                     disable_async_flip_planes & BIT(plane->id)) {
1173                         /*
1174                          * Apart from the async flip bit we want to
1175                          * preserve the old state for the plane.
1176                          */
1177                         plane->async_flip(plane, old_crtc_state,
1178                                           old_plane_state, false);
1179                         need_vbl_wait = true;
1180                 }
1181         }
1182
1183         if (need_vbl_wait)
1184                 intel_crtc_wait_for_next_vblank(crtc);
1185 }
1186
1187 static void intel_pre_plane_update(struct intel_atomic_state *state,
1188                                    struct intel_crtc *crtc)
1189 {
1190         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1191         const struct intel_crtc_state *old_crtc_state =
1192                 intel_atomic_get_old_crtc_state(state, crtc);
1193         const struct intel_crtc_state *new_crtc_state =
1194                 intel_atomic_get_new_crtc_state(state, crtc);
1195         enum pipe pipe = crtc->pipe;
1196
1197         intel_drrs_deactivate(old_crtc_state);
1198
1199         intel_psr_pre_plane_update(state, crtc);
1200
1201         if (hsw_ips_pre_update(state, crtc))
1202                 intel_crtc_wait_for_next_vblank(crtc);
1203
1204         if (intel_fbc_pre_update(state, crtc))
1205                 intel_crtc_wait_for_next_vblank(crtc);
1206
1207         if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1208             needs_async_flip_vtd_wa(new_crtc_state))
1209                 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1210
1211         /* Display WA 827 */
1212         if (!needs_nv12_wa(old_crtc_state) &&
1213             needs_nv12_wa(new_crtc_state))
1214                 skl_wa_827(dev_priv, pipe, true);
1215
1216         /* Wa_2006604312:icl,ehl */
1217         if (!needs_scalerclk_wa(old_crtc_state) &&
1218             needs_scalerclk_wa(new_crtc_state))
1219                 icl_wa_scalerclkgating(dev_priv, pipe, true);
1220
1221         /* Wa_1604331009:icl,jsl,ehl */
1222         if (!needs_cursorclk_wa(old_crtc_state) &&
1223             needs_cursorclk_wa(new_crtc_state))
1224                 icl_wa_cursorclkgating(dev_priv, pipe, true);
1225
1226         /*
1227          * Vblank time updates from the shadow to live plane control register
1228          * are blocked if the memory self-refresh mode is active at that
1229          * moment. So to make sure the plane gets truly disabled, disable
1230          * first the self-refresh mode. The self-refresh enable bit in turn
1231          * will be checked/applied by the HW only at the next frame start
1232          * event which is after the vblank start event, so we need to have a
1233          * wait-for-vblank between disabling the plane and the pipe.
1234          */
1235         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1236             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1237                 intel_crtc_wait_for_next_vblank(crtc);
1238
1239         /*
1240          * IVB workaround: must disable low power watermarks for at least
1241          * one frame before enabling scaling.  LP watermarks can be re-enabled
1242          * when scaling is disabled.
1243          *
1244          * WaCxSRDisabledForSpriteScaling:ivb
1245          */
1246         if (old_crtc_state->hw.active &&
1247             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1248                 intel_crtc_wait_for_next_vblank(crtc);
1249
1250         /*
1251          * If we're doing a modeset we don't need to do any
1252          * pre-vblank watermark programming here.
1253          */
1254         if (!intel_crtc_needs_modeset(new_crtc_state)) {
1255                 /*
1256                  * For platforms that support atomic watermarks, program the
1257                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1258                  * will be the intermediate values that are safe for both pre- and
1259                  * post- vblank; when vblank happens, the 'active' values will be set
1260                  * to the final 'target' values and we'll do this again to get the
1261                  * optimal watermarks.  For gen9+ platforms, the values we program here
1262                  * will be the final target values which will get automatically latched
1263                  * at vblank time; no further programming will be necessary.
1264                  *
1265                  * If a platform hasn't been transitioned to atomic watermarks yet,
1266                  * we'll continue to update watermarks the old way, if flags tell
1267                  * us to.
1268                  */
1269                 if (!intel_initial_watermarks(state, crtc))
1270                         if (new_crtc_state->update_wm_pre)
1271                                 intel_update_watermarks(dev_priv);
1272         }
1273
1274         /*
1275          * Gen2 reports pipe underruns whenever all planes are disabled.
1276          * So disable underrun reporting before all the planes get disabled.
1277          *
1278          * We do this after .initial_watermarks() so that we have a
1279          * chance of catching underruns with the intermediate watermarks
1280          * vs. the old plane configuration.
1281          */
1282         if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1283                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1284
1285         /*
1286          * WA for platforms where async address update enable bit
1287          * is double buffered and only latched at start of vblank.
1288          */
1289         if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
1290                 intel_crtc_async_flip_disable_wa(state, crtc);
1291 }
1292
1293 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1294                                       struct intel_crtc *crtc)
1295 {
1296         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1297         const struct intel_crtc_state *new_crtc_state =
1298                 intel_atomic_get_new_crtc_state(state, crtc);
1299         unsigned int update_mask = new_crtc_state->update_planes;
1300         const struct intel_plane_state *old_plane_state;
1301         struct intel_plane *plane;
1302         unsigned fb_bits = 0;
1303         int i;
1304
1305         intel_crtc_dpms_overlay_disable(crtc);
1306
1307         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1308                 if (crtc->pipe != plane->pipe ||
1309                     !(update_mask & BIT(plane->id)))
1310                         continue;
1311
1312                 intel_plane_disable_arm(plane, new_crtc_state);
1313
1314                 if (old_plane_state->uapi.visible)
1315                         fb_bits |= plane->frontbuffer_bit;
1316         }
1317
1318         intel_frontbuffer_flip(dev_priv, fb_bits);
1319 }
1320
1321 /*
1322  * intel_connector_primary_encoder - get the primary encoder for a connector
1323  * @connector: connector for which to return the encoder
1324  *
1325  * Returns the primary encoder for a connector. There is a 1:1 mapping from
1326  * all connectors to their encoder, except for DP-MST connectors which have
1327  * both a virtual and a primary encoder. These DP-MST primary encoders can be
1328  * pointed to by as many DP-MST connectors as there are pipes.
1329  */
1330 static struct intel_encoder *
1331 intel_connector_primary_encoder(struct intel_connector *connector)
1332 {
1333         struct intel_encoder *encoder;
1334
1335         if (connector->mst_port)
1336                 return &dp_to_dig_port(connector->mst_port)->base;
1337
1338         encoder = intel_attached_encoder(connector);
1339         drm_WARN_ON(connector->base.dev, !encoder);
1340
1341         return encoder;
1342 }
1343
1344 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1345 {
1346         struct drm_i915_private *i915 = to_i915(state->base.dev);
1347         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1348         struct intel_crtc *crtc;
1349         struct drm_connector_state *new_conn_state;
1350         struct drm_connector *connector;
1351         int i;
1352
1353         /*
1354          * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1355          * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1356          */
1357         if (i915->display.dpll.mgr) {
1358                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1359                         if (intel_crtc_needs_modeset(new_crtc_state))
1360                                 continue;
1361
1362                         new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1363                         new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1364                 }
1365         }
1366
1367         if (!state->modeset)
1368                 return;
1369
1370         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1371                                         i) {
1372                 struct intel_connector *intel_connector;
1373                 struct intel_encoder *encoder;
1374                 struct intel_crtc *crtc;
1375
1376                 if (!intel_connector_needs_modeset(state, connector))
1377                         continue;
1378
1379                 intel_connector = to_intel_connector(connector);
1380                 encoder = intel_connector_primary_encoder(intel_connector);
1381                 if (!encoder->update_prepare)
1382                         continue;
1383
1384                 crtc = new_conn_state->crtc ?
1385                         to_intel_crtc(new_conn_state->crtc) : NULL;
1386                 encoder->update_prepare(state, encoder, crtc);
1387         }
1388 }
1389
1390 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1391 {
1392         struct drm_connector_state *new_conn_state;
1393         struct drm_connector *connector;
1394         int i;
1395
1396         if (!state->modeset)
1397                 return;
1398
1399         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1400                                         i) {
1401                 struct intel_connector *intel_connector;
1402                 struct intel_encoder *encoder;
1403                 struct intel_crtc *crtc;
1404
1405                 if (!intel_connector_needs_modeset(state, connector))
1406                         continue;
1407
1408                 intel_connector = to_intel_connector(connector);
1409                 encoder = intel_connector_primary_encoder(intel_connector);
1410                 if (!encoder->update_complete)
1411                         continue;
1412
1413                 crtc = new_conn_state->crtc ?
1414                         to_intel_crtc(new_conn_state->crtc) : NULL;
1415                 encoder->update_complete(state, encoder, crtc);
1416         }
1417 }
1418
1419 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1420                                           struct intel_crtc *crtc)
1421 {
1422         const struct intel_crtc_state *crtc_state =
1423                 intel_atomic_get_new_crtc_state(state, crtc);
1424         const struct drm_connector_state *conn_state;
1425         struct drm_connector *conn;
1426         int i;
1427
1428         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1429                 struct intel_encoder *encoder =
1430                         to_intel_encoder(conn_state->best_encoder);
1431
1432                 if (conn_state->crtc != &crtc->base)
1433                         continue;
1434
1435                 if (encoder->pre_pll_enable)
1436                         encoder->pre_pll_enable(state, encoder,
1437                                                 crtc_state, conn_state);
1438         }
1439 }
1440
1441 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1442                                       struct intel_crtc *crtc)
1443 {
1444         const struct intel_crtc_state *crtc_state =
1445                 intel_atomic_get_new_crtc_state(state, crtc);
1446         const struct drm_connector_state *conn_state;
1447         struct drm_connector *conn;
1448         int i;
1449
1450         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1451                 struct intel_encoder *encoder =
1452                         to_intel_encoder(conn_state->best_encoder);
1453
1454                 if (conn_state->crtc != &crtc->base)
1455                         continue;
1456
1457                 if (encoder->pre_enable)
1458                         encoder->pre_enable(state, encoder,
1459                                             crtc_state, conn_state);
1460         }
1461 }
1462
1463 static void intel_encoders_enable(struct intel_atomic_state *state,
1464                                   struct intel_crtc *crtc)
1465 {
1466         const struct intel_crtc_state *crtc_state =
1467                 intel_atomic_get_new_crtc_state(state, crtc);
1468         const struct drm_connector_state *conn_state;
1469         struct drm_connector *conn;
1470         int i;
1471
1472         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1473                 struct intel_encoder *encoder =
1474                         to_intel_encoder(conn_state->best_encoder);
1475
1476                 if (conn_state->crtc != &crtc->base)
1477                         continue;
1478
1479                 if (encoder->enable)
1480                         encoder->enable(state, encoder,
1481                                         crtc_state, conn_state);
1482                 intel_opregion_notify_encoder(encoder, true);
1483         }
1484 }
1485
1486 static void intel_encoders_disable(struct intel_atomic_state *state,
1487                                    struct intel_crtc *crtc)
1488 {
1489         const struct intel_crtc_state *old_crtc_state =
1490                 intel_atomic_get_old_crtc_state(state, crtc);
1491         const struct drm_connector_state *old_conn_state;
1492         struct drm_connector *conn;
1493         int i;
1494
1495         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1496                 struct intel_encoder *encoder =
1497                         to_intel_encoder(old_conn_state->best_encoder);
1498
1499                 if (old_conn_state->crtc != &crtc->base)
1500                         continue;
1501
1502                 intel_opregion_notify_encoder(encoder, false);
1503                 if (encoder->disable)
1504                         encoder->disable(state, encoder,
1505                                          old_crtc_state, old_conn_state);
1506         }
1507 }
1508
1509 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1510                                         struct intel_crtc *crtc)
1511 {
1512         const struct intel_crtc_state *old_crtc_state =
1513                 intel_atomic_get_old_crtc_state(state, crtc);
1514         const struct drm_connector_state *old_conn_state;
1515         struct drm_connector *conn;
1516         int i;
1517
1518         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1519                 struct intel_encoder *encoder =
1520                         to_intel_encoder(old_conn_state->best_encoder);
1521
1522                 if (old_conn_state->crtc != &crtc->base)
1523                         continue;
1524
1525                 if (encoder->post_disable)
1526                         encoder->post_disable(state, encoder,
1527                                               old_crtc_state, old_conn_state);
1528         }
1529 }
1530
1531 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1532                                             struct intel_crtc *crtc)
1533 {
1534         const struct intel_crtc_state *old_crtc_state =
1535                 intel_atomic_get_old_crtc_state(state, crtc);
1536         const struct drm_connector_state *old_conn_state;
1537         struct drm_connector *conn;
1538         int i;
1539
1540         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1541                 struct intel_encoder *encoder =
1542                         to_intel_encoder(old_conn_state->best_encoder);
1543
1544                 if (old_conn_state->crtc != &crtc->base)
1545                         continue;
1546
1547                 if (encoder->post_pll_disable)
1548                         encoder->post_pll_disable(state, encoder,
1549                                                   old_crtc_state, old_conn_state);
1550         }
1551 }
1552
1553 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1554                                        struct intel_crtc *crtc)
1555 {
1556         const struct intel_crtc_state *crtc_state =
1557                 intel_atomic_get_new_crtc_state(state, crtc);
1558         const struct drm_connector_state *conn_state;
1559         struct drm_connector *conn;
1560         int i;
1561
1562         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1563                 struct intel_encoder *encoder =
1564                         to_intel_encoder(conn_state->best_encoder);
1565
1566                 if (conn_state->crtc != &crtc->base)
1567                         continue;
1568
1569                 if (encoder->update_pipe)
1570                         encoder->update_pipe(state, encoder,
1571                                              crtc_state, conn_state);
1572         }
1573 }
1574
1575 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1576 {
1577         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1578         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1579
1580         plane->disable_arm(plane, crtc_state);
1581 }
1582
1583 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1584 {
1585         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1586         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1587
1588         if (crtc_state->has_pch_encoder) {
1589                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1590                                                &crtc_state->fdi_m_n);
1591         } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1592                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1593                                                &crtc_state->dp_m_n);
1594                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1595                                                &crtc_state->dp_m2_n2);
1596         }
1597
1598         intel_set_transcoder_timings(crtc_state);
1599
1600         ilk_set_pipeconf(crtc_state);
1601 }
1602
1603 static void ilk_crtc_enable(struct intel_atomic_state *state,
1604                             struct intel_crtc *crtc)
1605 {
1606         const struct intel_crtc_state *new_crtc_state =
1607                 intel_atomic_get_new_crtc_state(state, crtc);
1608         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1609         enum pipe pipe = crtc->pipe;
1610
1611         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1612                 return;
1613
1614         /*
1615          * Sometimes spurious CPU pipe underruns happen during FDI
1616          * training, at least with VGA+HDMI cloning. Suppress them.
1617          *
1618          * On ILK we get an occasional spurious CPU pipe underruns
1619          * between eDP port A enable and vdd enable. Also PCH port
1620          * enable seems to result in the occasional CPU pipe underrun.
1621          *
1622          * Spurious PCH underruns also occur during PCH enabling.
1623          */
1624         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1625         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1626
1627         ilk_configure_cpu_transcoder(new_crtc_state);
1628
1629         intel_set_pipe_src_size(new_crtc_state);
1630
1631         crtc->active = true;
1632
1633         intel_encoders_pre_enable(state, crtc);
1634
1635         if (new_crtc_state->has_pch_encoder) {
1636                 ilk_pch_pre_enable(state, crtc);
1637         } else {
1638                 assert_fdi_tx_disabled(dev_priv, pipe);
1639                 assert_fdi_rx_disabled(dev_priv, pipe);
1640         }
1641
1642         ilk_pfit_enable(new_crtc_state);
1643
1644         /*
1645          * On ILK+ LUT must be loaded before the pipe is running but with
1646          * clocks enabled
1647          */
1648         intel_color_load_luts(new_crtc_state);
1649         intel_color_commit_noarm(new_crtc_state);
1650         intel_color_commit_arm(new_crtc_state);
1651         /* update DSPCNTR to configure gamma for pipe bottom color */
1652         intel_disable_primary_plane(new_crtc_state);
1653
1654         intel_initial_watermarks(state, crtc);
1655         intel_enable_transcoder(new_crtc_state);
1656
1657         if (new_crtc_state->has_pch_encoder)
1658                 ilk_pch_enable(state, crtc);
1659
1660         intel_crtc_vblank_on(new_crtc_state);
1661
1662         intel_encoders_enable(state, crtc);
1663
1664         if (HAS_PCH_CPT(dev_priv))
1665                 intel_wait_for_pipe_scanline_moving(crtc);
1666
1667         /*
1668          * Must wait for vblank to avoid spurious PCH FIFO underruns.
1669          * And a second vblank wait is needed at least on ILK with
1670          * some interlaced HDMI modes. Let's do the double wait always
1671          * in case there are more corner cases we don't know about.
1672          */
1673         if (new_crtc_state->has_pch_encoder) {
1674                 intel_crtc_wait_for_next_vblank(crtc);
1675                 intel_crtc_wait_for_next_vblank(crtc);
1676         }
1677         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1678         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1679 }
1680
1681 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1682                                             enum pipe pipe, bool apply)
1683 {
1684         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1685         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1686
1687         if (apply)
1688                 val |= mask;
1689         else
1690                 val &= ~mask;
1691
1692         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1693 }
1694
1695 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1696 {
1697         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1698         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1699
1700         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1701                        HSW_LINETIME(crtc_state->linetime) |
1702                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
1703 }
1704
1705 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1706 {
1707         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1708         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1709         enum transcoder transcoder = crtc_state->cpu_transcoder;
1710         i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
1711                          CHICKEN_TRANS(transcoder);
1712
1713         intel_de_rmw(dev_priv, reg,
1714                      HSW_FRAME_START_DELAY_MASK,
1715                      HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
1716 }
1717
1718 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1719                                          const struct intel_crtc_state *crtc_state)
1720 {
1721         struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
1722
1723         /*
1724          * Enable sequence steps 1-7 on bigjoiner master
1725          */
1726         if (intel_crtc_is_bigjoiner_slave(crtc_state))
1727                 intel_encoders_pre_pll_enable(state, master_crtc);
1728
1729         if (crtc_state->shared_dpll)
1730                 intel_enable_shared_dpll(crtc_state);
1731
1732         if (intel_crtc_is_bigjoiner_slave(crtc_state))
1733                 intel_encoders_pre_enable(state, master_crtc);
1734 }
1735
1736 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1737 {
1738         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1739         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1740         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1741
1742         if (crtc_state->has_pch_encoder) {
1743                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1744                                                &crtc_state->fdi_m_n);
1745         } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1746                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1747                                                &crtc_state->dp_m_n);
1748                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1749                                                &crtc_state->dp_m2_n2);
1750         }
1751
1752         intel_set_transcoder_timings(crtc_state);
1753
1754         if (cpu_transcoder != TRANSCODER_EDP)
1755                 intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
1756                                crtc_state->pixel_multiplier - 1);
1757
1758         hsw_set_frame_start_delay(crtc_state);
1759
1760         hsw_set_transconf(crtc_state);
1761 }
1762
1763 static void hsw_crtc_enable(struct intel_atomic_state *state,
1764                             struct intel_crtc *crtc)
1765 {
1766         const struct intel_crtc_state *new_crtc_state =
1767                 intel_atomic_get_new_crtc_state(state, crtc);
1768         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1769         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
1770         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1771         bool psl_clkgate_wa;
1772
1773         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1774                 return;
1775
1776         intel_dmc_enable_pipe(dev_priv, crtc->pipe);
1777
1778         if (!new_crtc_state->bigjoiner_pipes) {
1779                 intel_encoders_pre_pll_enable(state, crtc);
1780
1781                 if (new_crtc_state->shared_dpll)
1782                         intel_enable_shared_dpll(new_crtc_state);
1783
1784                 intel_encoders_pre_enable(state, crtc);
1785         } else {
1786                 icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
1787         }
1788
1789         intel_dsc_enable(new_crtc_state);
1790
1791         if (DISPLAY_VER(dev_priv) >= 13)
1792                 intel_uncompressed_joiner_enable(new_crtc_state);
1793
1794         intel_set_pipe_src_size(new_crtc_state);
1795         if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1796                 bdw_set_pipemisc(new_crtc_state);
1797
1798         if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
1799             !transcoder_is_dsi(cpu_transcoder))
1800                 hsw_configure_cpu_transcoder(new_crtc_state);
1801
1802         crtc->active = true;
1803
1804         /* Display WA #1180: WaDisableScalarClockGating: glk */
1805         psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
1806                 new_crtc_state->pch_pfit.enabled;
1807         if (psl_clkgate_wa)
1808                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
1809
1810         if (DISPLAY_VER(dev_priv) >= 9)
1811                 skl_pfit_enable(new_crtc_state);
1812         else
1813                 ilk_pfit_enable(new_crtc_state);
1814
1815         /*
1816          * On ILK+ LUT must be loaded before the pipe is running but with
1817          * clocks enabled
1818          */
1819         intel_color_load_luts(new_crtc_state);
1820         intel_color_commit_noarm(new_crtc_state);
1821         intel_color_commit_arm(new_crtc_state);
1822         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1823         if (DISPLAY_VER(dev_priv) < 9)
1824                 intel_disable_primary_plane(new_crtc_state);
1825
1826         hsw_set_linetime_wm(new_crtc_state);
1827
1828         if (DISPLAY_VER(dev_priv) >= 11)
1829                 icl_set_pipe_chicken(new_crtc_state);
1830
1831         intel_initial_watermarks(state, crtc);
1832
1833         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
1834                 intel_crtc_vblank_on(new_crtc_state);
1835
1836         intel_encoders_enable(state, crtc);
1837
1838         if (psl_clkgate_wa) {
1839                 intel_crtc_wait_for_next_vblank(crtc);
1840                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
1841         }
1842
1843         /* If we change the relative order between pipe/planes enabling, we need
1844          * to change the workaround. */
1845         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
1846         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1847                 struct intel_crtc *wa_crtc;
1848
1849                 wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1850
1851                 intel_crtc_wait_for_next_vblank(wa_crtc);
1852                 intel_crtc_wait_for_next_vblank(wa_crtc);
1853         }
1854 }
1855
1856 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1857 {
1858         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1859         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1860         enum pipe pipe = crtc->pipe;
1861
1862         /* To avoid upsetting the power well on haswell only disable the pfit if
1863          * it's in use. The hw state code will make sure we get this right. */
1864         if (!old_crtc_state->pch_pfit.enabled)
1865                 return;
1866
1867         intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
1868         intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
1869         intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
1870 }
1871
1872 static void ilk_crtc_disable(struct intel_atomic_state *state,
1873                              struct intel_crtc *crtc)
1874 {
1875         const struct intel_crtc_state *old_crtc_state =
1876                 intel_atomic_get_old_crtc_state(state, crtc);
1877         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1878         enum pipe pipe = crtc->pipe;
1879
1880         /*
1881          * Sometimes spurious CPU pipe underruns happen when the
1882          * pipe is already disabled, but FDI RX/TX is still enabled.
1883          * Happens at least with VGA+HDMI cloning. Suppress them.
1884          */
1885         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1886         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1887
1888         intel_encoders_disable(state, crtc);
1889
1890         intel_crtc_vblank_off(old_crtc_state);
1891
1892         intel_disable_transcoder(old_crtc_state);
1893
1894         ilk_pfit_disable(old_crtc_state);
1895
1896         if (old_crtc_state->has_pch_encoder)
1897                 ilk_pch_disable(state, crtc);
1898
1899         intel_encoders_post_disable(state, crtc);
1900
1901         if (old_crtc_state->has_pch_encoder)
1902                 ilk_pch_post_disable(state, crtc);
1903
1904         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1905         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1906 }
1907
1908 static void hsw_crtc_disable(struct intel_atomic_state *state,
1909                              struct intel_crtc *crtc)
1910 {
1911         const struct intel_crtc_state *old_crtc_state =
1912                 intel_atomic_get_old_crtc_state(state, crtc);
1913         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1914
1915         /*
1916          * FIXME collapse everything to one hook.
1917          * Need care with mst->ddi interactions.
1918          */
1919         if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
1920                 intel_encoders_disable(state, crtc);
1921                 intel_encoders_post_disable(state, crtc);
1922         }
1923
1924         intel_dmc_disable_pipe(i915, crtc->pipe);
1925 }
1926
1927 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
1928 {
1929         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1930         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1931
1932         if (!crtc_state->gmch_pfit.control)
1933                 return;
1934
1935         /*
1936          * The panel fitter should only be adjusted whilst the pipe is disabled,
1937          * according to register description and PRM.
1938          */
1939         drm_WARN_ON(&dev_priv->drm,
1940                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
1941         assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1942
1943         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
1944                        crtc_state->gmch_pfit.pgm_ratios);
1945         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
1946
1947         /* Border color in case we don't scale up to the full screen. Black by
1948          * default, change to something else for debugging. */
1949         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
1950 }
1951
1952 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
1953 {
1954         if (phy == PHY_NONE)
1955                 return false;
1956         else if (IS_ALDERLAKE_S(dev_priv))
1957                 return phy <= PHY_E;
1958         else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
1959                 return phy <= PHY_D;
1960         else if (IS_JSL_EHL(dev_priv))
1961                 return phy <= PHY_C;
1962         else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
1963                 return phy <= PHY_B;
1964         else
1965                 /*
1966                  * DG2 outputs labelled as "combo PHY" in the bspec use
1967                  * SNPS PHYs with completely different programming,
1968                  * hence we always return false here.
1969                  */
1970                 return false;
1971 }
1972
1973 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
1974 {
1975         if (IS_DG2(dev_priv))
1976                 /* DG2's "TC1" output uses a SNPS PHY */
1977                 return false;
1978         else if (IS_ALDERLAKE_P(dev_priv))
1979                 return phy >= PHY_F && phy <= PHY_I;
1980         else if (IS_TIGERLAKE(dev_priv))
1981                 return phy >= PHY_D && phy <= PHY_I;
1982         else if (IS_ICELAKE(dev_priv))
1983                 return phy >= PHY_C && phy <= PHY_F;
1984         else
1985                 return false;
1986 }
1987
1988 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
1989 {
1990         if (phy == PHY_NONE)
1991                 return false;
1992         else if (IS_DG2(dev_priv))
1993                 /*
1994                  * All four "combo" ports and the TC1 port (PHY E) use
1995                  * Synopsis PHYs.
1996                  */
1997                 return phy <= PHY_E;
1998
1999         return false;
2000 }
2001
2002 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2003 {
2004         if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2005                 return PHY_D + port - PORT_D_XELPD;
2006         else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2007                 return PHY_F + port - PORT_TC1;
2008         else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2009                 return PHY_B + port - PORT_TC1;
2010         else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2011                 return PHY_C + port - PORT_TC1;
2012         else if (IS_JSL_EHL(i915) && port == PORT_D)
2013                 return PHY_A;
2014
2015         return PHY_A + port - PORT_A;
2016 }
2017
2018 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2019 {
2020         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2021                 return TC_PORT_NONE;
2022
2023         if (DISPLAY_VER(dev_priv) >= 12)
2024                 return TC_PORT_1 + port - PORT_TC1;
2025         else
2026                 return TC_PORT_1 + port - PORT_C;
2027 }
2028
2029 enum intel_display_power_domain
2030 intel_aux_power_domain(struct intel_digital_port *dig_port)
2031 {
2032         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2033
2034         if (intel_tc_port_in_tbt_alt_mode(dig_port))
2035                 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
2036
2037         return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
2038 }
2039
2040 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2041                                    struct intel_power_domain_mask *mask)
2042 {
2043         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2044         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2045         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2046         struct drm_encoder *encoder;
2047         enum pipe pipe = crtc->pipe;
2048
2049         bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2050
2051         if (!crtc_state->hw.active)
2052                 return;
2053
2054         set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2055         set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2056         if (crtc_state->pch_pfit.enabled ||
2057             crtc_state->pch_pfit.force_thru)
2058                 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2059
2060         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2061                                   crtc_state->uapi.encoder_mask) {
2062                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2063
2064                 set_bit(intel_encoder->power_domain, mask->bits);
2065         }
2066
2067         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2068                 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2069
2070         if (crtc_state->shared_dpll)
2071                 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2072
2073         if (crtc_state->dsc.compression_enable)
2074                 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2075 }
2076
2077 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2078                                           struct intel_power_domain_mask *old_domains)
2079 {
2080         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2081         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2082         enum intel_display_power_domain domain;
2083         struct intel_power_domain_mask domains, new_domains;
2084
2085         get_crtc_power_domains(crtc_state, &domains);
2086
2087         bitmap_andnot(new_domains.bits,
2088                       domains.bits,
2089                       crtc->enabled_power_domains.mask.bits,
2090                       POWER_DOMAIN_NUM);
2091         bitmap_andnot(old_domains->bits,
2092                       crtc->enabled_power_domains.mask.bits,
2093                       domains.bits,
2094                       POWER_DOMAIN_NUM);
2095
2096         for_each_power_domain(domain, &new_domains)
2097                 intel_display_power_get_in_set(dev_priv,
2098                                                &crtc->enabled_power_domains,
2099                                                domain);
2100 }
2101
2102 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2103                                           struct intel_power_domain_mask *domains)
2104 {
2105         intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2106                                             &crtc->enabled_power_domains,
2107                                             domains);
2108 }
2109
2110 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2111 {
2112         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2113         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2114
2115         if (intel_crtc_has_dp_encoder(crtc_state)) {
2116                 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2117                                                &crtc_state->dp_m_n);
2118                 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2119                                                &crtc_state->dp_m2_n2);
2120         }
2121
2122         intel_set_transcoder_timings(crtc_state);
2123
2124         i9xx_set_pipeconf(crtc_state);
2125 }
2126
2127 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2128                                    struct intel_crtc *crtc)
2129 {
2130         const struct intel_crtc_state *new_crtc_state =
2131                 intel_atomic_get_new_crtc_state(state, crtc);
2132         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2133         enum pipe pipe = crtc->pipe;
2134
2135         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2136                 return;
2137
2138         i9xx_configure_cpu_transcoder(new_crtc_state);
2139
2140         intel_set_pipe_src_size(new_crtc_state);
2141
2142         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2143                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2144                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2145         }
2146
2147         crtc->active = true;
2148
2149         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2150
2151         intel_encoders_pre_pll_enable(state, crtc);
2152
2153         if (IS_CHERRYVIEW(dev_priv))
2154                 chv_enable_pll(new_crtc_state);
2155         else
2156                 vlv_enable_pll(new_crtc_state);
2157
2158         intel_encoders_pre_enable(state, crtc);
2159
2160         i9xx_pfit_enable(new_crtc_state);
2161
2162         intel_color_load_luts(new_crtc_state);
2163         intel_color_commit_noarm(new_crtc_state);
2164         intel_color_commit_arm(new_crtc_state);
2165         /* update DSPCNTR to configure gamma for pipe bottom color */
2166         intel_disable_primary_plane(new_crtc_state);
2167
2168         intel_initial_watermarks(state, crtc);
2169         intel_enable_transcoder(new_crtc_state);
2170
2171         intel_crtc_vblank_on(new_crtc_state);
2172
2173         intel_encoders_enable(state, crtc);
2174 }
2175
2176 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2177                              struct intel_crtc *crtc)
2178 {
2179         const struct intel_crtc_state *new_crtc_state =
2180                 intel_atomic_get_new_crtc_state(state, crtc);
2181         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2182         enum pipe pipe = crtc->pipe;
2183
2184         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2185                 return;
2186
2187         i9xx_configure_cpu_transcoder(new_crtc_state);
2188
2189         intel_set_pipe_src_size(new_crtc_state);
2190
2191         crtc->active = true;
2192
2193         if (DISPLAY_VER(dev_priv) != 2)
2194                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2195
2196         intel_encoders_pre_enable(state, crtc);
2197
2198         i9xx_enable_pll(new_crtc_state);
2199
2200         i9xx_pfit_enable(new_crtc_state);
2201
2202         intel_color_load_luts(new_crtc_state);
2203         intel_color_commit_noarm(new_crtc_state);
2204         intel_color_commit_arm(new_crtc_state);
2205         /* update DSPCNTR to configure gamma for pipe bottom color */
2206         intel_disable_primary_plane(new_crtc_state);
2207
2208         if (!intel_initial_watermarks(state, crtc))
2209                 intel_update_watermarks(dev_priv);
2210         intel_enable_transcoder(new_crtc_state);
2211
2212         intel_crtc_vblank_on(new_crtc_state);
2213
2214         intel_encoders_enable(state, crtc);
2215
2216         /* prevents spurious underruns */
2217         if (DISPLAY_VER(dev_priv) == 2)
2218                 intel_crtc_wait_for_next_vblank(crtc);
2219 }
2220
2221 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2222 {
2223         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2224         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2225
2226         if (!old_crtc_state->gmch_pfit.control)
2227                 return;
2228
2229         assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2230
2231         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2232                     intel_de_read(dev_priv, PFIT_CONTROL));
2233         intel_de_write(dev_priv, PFIT_CONTROL, 0);
2234 }
2235
2236 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2237                               struct intel_crtc *crtc)
2238 {
2239         struct intel_crtc_state *old_crtc_state =
2240                 intel_atomic_get_old_crtc_state(state, crtc);
2241         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2242         enum pipe pipe = crtc->pipe;
2243
2244         /*
2245          * On gen2 planes are double buffered but the pipe isn't, so we must
2246          * wait for planes to fully turn off before disabling the pipe.
2247          */
2248         if (DISPLAY_VER(dev_priv) == 2)
2249                 intel_crtc_wait_for_next_vblank(crtc);
2250
2251         intel_encoders_disable(state, crtc);
2252
2253         intel_crtc_vblank_off(old_crtc_state);
2254
2255         intel_disable_transcoder(old_crtc_state);
2256
2257         i9xx_pfit_disable(old_crtc_state);
2258
2259         intel_encoders_post_disable(state, crtc);
2260
2261         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2262                 if (IS_CHERRYVIEW(dev_priv))
2263                         chv_disable_pll(dev_priv, pipe);
2264                 else if (IS_VALLEYVIEW(dev_priv))
2265                         vlv_disable_pll(dev_priv, pipe);
2266                 else
2267                         i9xx_disable_pll(old_crtc_state);
2268         }
2269
2270         intel_encoders_post_pll_disable(state, crtc);
2271
2272         if (DISPLAY_VER(dev_priv) != 2)
2273                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2274
2275         if (!dev_priv->display.funcs.wm->initial_watermarks)
2276                 intel_update_watermarks(dev_priv);
2277
2278         /* clock the pipe down to 640x480@60 to potentially save power */
2279         if (IS_I830(dev_priv))
2280                 i830_enable_pipe(dev_priv, pipe);
2281 }
2282
2283
2284 /*
2285  * turn all crtc's off, but do not adjust state
2286  * This has to be paired with a call to intel_modeset_setup_hw_state.
2287  */
2288 int intel_display_suspend(struct drm_device *dev)
2289 {
2290         struct drm_i915_private *dev_priv = to_i915(dev);
2291         struct drm_atomic_state *state;
2292         int ret;
2293
2294         if (!HAS_DISPLAY(dev_priv))
2295                 return 0;
2296
2297         state = drm_atomic_helper_suspend(dev);
2298         ret = PTR_ERR_OR_ZERO(state);
2299         if (ret)
2300                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2301                         ret);
2302         else
2303                 dev_priv->display.restore.modeset_state = state;
2304         return ret;
2305 }
2306
2307 void intel_encoder_destroy(struct drm_encoder *encoder)
2308 {
2309         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2310
2311         drm_encoder_cleanup(encoder);
2312         kfree(intel_encoder);
2313 }
2314
2315 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2316 {
2317         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2318
2319         /* GDG double wide on either pipe, otherwise pipe A only */
2320         return DISPLAY_VER(dev_priv) < 4 &&
2321                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2322 }
2323
2324 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2325 {
2326         u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2327         struct drm_rect src;
2328
2329         /*
2330          * We only use IF-ID interlacing. If we ever use
2331          * PF-ID we'll need to adjust the pixel_rate here.
2332          */
2333
2334         if (!crtc_state->pch_pfit.enabled)
2335                 return pixel_rate;
2336
2337         drm_rect_init(&src, 0, 0,
2338                       drm_rect_width(&crtc_state->pipe_src) << 16,
2339                       drm_rect_height(&crtc_state->pipe_src) << 16);
2340
2341         return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2342                                    pixel_rate);
2343 }
2344
2345 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2346                                          const struct drm_display_mode *timings)
2347 {
2348         mode->hdisplay = timings->crtc_hdisplay;
2349         mode->htotal = timings->crtc_htotal;
2350         mode->hsync_start = timings->crtc_hsync_start;
2351         mode->hsync_end = timings->crtc_hsync_end;
2352
2353         mode->vdisplay = timings->crtc_vdisplay;
2354         mode->vtotal = timings->crtc_vtotal;
2355         mode->vsync_start = timings->crtc_vsync_start;
2356         mode->vsync_end = timings->crtc_vsync_end;
2357
2358         mode->flags = timings->flags;
2359         mode->type = DRM_MODE_TYPE_DRIVER;
2360
2361         mode->clock = timings->crtc_clock;
2362
2363         drm_mode_set_name(mode);
2364 }
2365
2366 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2367 {
2368         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2369
2370         if (HAS_GMCH(dev_priv))
2371                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2372                 crtc_state->pixel_rate =
2373                         crtc_state->hw.pipe_mode.crtc_clock;
2374         else
2375                 crtc_state->pixel_rate =
2376                         ilk_pipe_pixel_rate(crtc_state);
2377 }
2378
2379 static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2380                                            struct drm_display_mode *mode)
2381 {
2382         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2383
2384         if (num_pipes < 2)
2385                 return;
2386
2387         mode->crtc_clock /= num_pipes;
2388         mode->crtc_hdisplay /= num_pipes;
2389         mode->crtc_hblank_start /= num_pipes;
2390         mode->crtc_hblank_end /= num_pipes;
2391         mode->crtc_hsync_start /= num_pipes;
2392         mode->crtc_hsync_end /= num_pipes;
2393         mode->crtc_htotal /= num_pipes;
2394 }
2395
2396 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2397                                           struct drm_display_mode *mode)
2398 {
2399         int overlap = crtc_state->splitter.pixel_overlap;
2400         int n = crtc_state->splitter.link_count;
2401
2402         if (!crtc_state->splitter.enable)
2403                 return;
2404
2405         /*
2406          * eDP MSO uses segment timings from EDID for transcoder
2407          * timings, but full mode for everything else.
2408          *
2409          * h_full = (h_segment - pixel_overlap) * link_count
2410          */
2411         mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2412         mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2413         mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2414         mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2415         mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2416         mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2417         mode->crtc_clock *= n;
2418 }
2419
2420 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2421 {
2422         struct drm_display_mode *mode = &crtc_state->hw.mode;
2423         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2424         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2425
2426         /*
2427          * Start with the adjusted_mode crtc timings, which
2428          * have been filled with the transcoder timings.
2429          */
2430         drm_mode_copy(pipe_mode, adjusted_mode);
2431
2432         /* Expand MSO per-segment transcoder timings to full */
2433         intel_splitter_adjust_timings(crtc_state, pipe_mode);
2434
2435         /*
2436          * We want the full numbers in adjusted_mode normal timings,
2437          * adjusted_mode crtc timings are left with the raw transcoder
2438          * timings.
2439          */
2440         intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2441
2442         /* Populate the "user" mode with full numbers */
2443         drm_mode_copy(mode, pipe_mode);
2444         intel_mode_from_crtc_timings(mode, mode);
2445         mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2446                 (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
2447         mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2448
2449         /* Derive per-pipe timings in case bigjoiner is used */
2450         intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2451         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2452
2453         intel_crtc_compute_pixel_rate(crtc_state);
2454 }
2455
2456 void intel_encoder_get_config(struct intel_encoder *encoder,
2457                               struct intel_crtc_state *crtc_state)
2458 {
2459         encoder->get_config(encoder, crtc_state);
2460
2461         intel_crtc_readout_derived_state(crtc_state);
2462 }
2463
2464 static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2465 {
2466         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2467         int width, height;
2468
2469         if (num_pipes < 2)
2470                 return;
2471
2472         width = drm_rect_width(&crtc_state->pipe_src);
2473         height = drm_rect_height(&crtc_state->pipe_src);
2474
2475         drm_rect_init(&crtc_state->pipe_src, 0, 0,
2476                       width / num_pipes, height);
2477 }
2478
2479 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2480 {
2481         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2482         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2483
2484         intel_bigjoiner_compute_pipe_src(crtc_state);
2485
2486         /*
2487          * Pipe horizontal size must be even in:
2488          * - DVO ganged mode
2489          * - LVDS dual channel mode
2490          * - Double wide pipe
2491          */
2492         if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2493                 if (crtc_state->double_wide) {
2494                         drm_dbg_kms(&i915->drm,
2495                                     "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2496                                     crtc->base.base.id, crtc->base.name);
2497                         return -EINVAL;
2498                 }
2499
2500                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2501                     intel_is_dual_link_lvds(i915)) {
2502                         drm_dbg_kms(&i915->drm,
2503                                     "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2504                                     crtc->base.base.id, crtc->base.name);
2505                         return -EINVAL;
2506                 }
2507         }
2508
2509         return 0;
2510 }
2511
2512 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2513 {
2514         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2515         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2516         struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2517         struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2518         int clock_limit = i915->max_dotclk_freq;
2519
2520         /*
2521          * Start with the adjusted_mode crtc timings, which
2522          * have been filled with the transcoder timings.
2523          */
2524         drm_mode_copy(pipe_mode, adjusted_mode);
2525
2526         /* Expand MSO per-segment transcoder timings to full */
2527         intel_splitter_adjust_timings(crtc_state, pipe_mode);
2528
2529         /* Derive per-pipe timings in case bigjoiner is used */
2530         intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
2531         intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2532
2533         if (DISPLAY_VER(i915) < 4) {
2534                 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2535
2536                 /*
2537                  * Enable double wide mode when the dot clock
2538                  * is > 90% of the (display) core speed.
2539                  */
2540                 if (intel_crtc_supports_double_wide(crtc) &&
2541                     pipe_mode->crtc_clock > clock_limit) {
2542                         clock_limit = i915->max_dotclk_freq;
2543                         crtc_state->double_wide = true;
2544                 }
2545         }
2546
2547         if (pipe_mode->crtc_clock > clock_limit) {
2548                 drm_dbg_kms(&i915->drm,
2549                             "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2550                             crtc->base.base.id, crtc->base.name,
2551                             pipe_mode->crtc_clock, clock_limit,
2552                             str_yes_no(crtc_state->double_wide));
2553                 return -EINVAL;
2554         }
2555
2556         return 0;
2557 }
2558
2559 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2560                                      struct intel_crtc *crtc)
2561 {
2562         struct intel_crtc_state *crtc_state =
2563                 intel_atomic_get_new_crtc_state(state, crtc);
2564         int ret;
2565
2566         ret = intel_dpll_crtc_compute_clock(state, crtc);
2567         if (ret)
2568                 return ret;
2569
2570         ret = intel_crtc_compute_pipe_src(crtc_state);
2571         if (ret)
2572                 return ret;
2573
2574         ret = intel_crtc_compute_pipe_mode(crtc_state);
2575         if (ret)
2576                 return ret;
2577
2578         intel_crtc_compute_pixel_rate(crtc_state);
2579
2580         if (crtc_state->has_pch_encoder)
2581                 return ilk_fdi_compute_config(crtc, crtc_state);
2582
2583         return 0;
2584 }
2585
2586 static void
2587 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2588 {
2589         while (*num > DATA_LINK_M_N_MASK ||
2590                *den > DATA_LINK_M_N_MASK) {
2591                 *num >>= 1;
2592                 *den >>= 1;
2593         }
2594 }
2595
2596 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2597                         u32 m, u32 n, u32 constant_n)
2598 {
2599         if (constant_n)
2600                 *ret_n = constant_n;
2601         else
2602                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2603
2604         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2605         intel_reduce_m_n_ratio(ret_m, ret_n);
2606 }
2607
2608 void
2609 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
2610                        int pixel_clock, int link_clock,
2611                        struct intel_link_m_n *m_n,
2612                        bool fec_enable)
2613 {
2614         u32 data_clock = bits_per_pixel * pixel_clock;
2615
2616         if (fec_enable)
2617                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
2618
2619         /*
2620          * Windows/BIOS uses fixed M/N values always. Follow suit.
2621          *
2622          * Also several DP dongles in particular seem to be fussy
2623          * about too large link M/N values. Presumably the 20bit
2624          * value used by Windows/BIOS is acceptable to everyone.
2625          */
2626         m_n->tu = 64;
2627         compute_m_n(&m_n->data_m, &m_n->data_n,
2628                     data_clock, link_clock * nlanes * 8,
2629                     0x8000000);
2630
2631         compute_m_n(&m_n->link_m, &m_n->link_n,
2632                     pixel_clock, link_clock,
2633                     0x80000);
2634 }
2635
2636 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2637 {
2638         /*
2639          * There may be no VBT; and if the BIOS enabled SSC we can
2640          * just keep using it to avoid unnecessary flicker.  Whereas if the
2641          * BIOS isn't using it, don't assume it will work even if the VBT
2642          * indicates as much.
2643          */
2644         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2645                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2646                                                        PCH_DREF_CONTROL) &
2647                         DREF_SSC1_ENABLE;
2648
2649                 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2650                         drm_dbg_kms(&dev_priv->drm,
2651                                     "SSC %s by BIOS, overriding VBT which says %s\n",
2652                                     str_enabled_disabled(bios_lvds_use_ssc),
2653                                     str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2654                         dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2655                 }
2656         }
2657 }
2658
2659 void intel_zero_m_n(struct intel_link_m_n *m_n)
2660 {
2661         /* corresponds to 0 register value */
2662         memset(m_n, 0, sizeof(*m_n));
2663         m_n->tu = 1;
2664 }
2665
2666 void intel_set_m_n(struct drm_i915_private *i915,
2667                    const struct intel_link_m_n *m_n,
2668                    i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2669                    i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2670 {
2671         intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2672         intel_de_write(i915, data_n_reg, m_n->data_n);
2673         intel_de_write(i915, link_m_reg, m_n->link_m);
2674         /*
2675          * On BDW+ writing LINK_N arms the double buffered update
2676          * of all the M/N registers, so it must be written last.
2677          */
2678         intel_de_write(i915, link_n_reg, m_n->link_n);
2679 }
2680
2681 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2682                                     enum transcoder transcoder)
2683 {
2684         if (IS_HASWELL(dev_priv))
2685                 return transcoder == TRANSCODER_EDP;
2686
2687         return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2688 }
2689
2690 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2691                                     enum transcoder transcoder,
2692                                     const struct intel_link_m_n *m_n)
2693 {
2694         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2695         enum pipe pipe = crtc->pipe;
2696
2697         if (DISPLAY_VER(dev_priv) >= 5)
2698                 intel_set_m_n(dev_priv, m_n,
2699                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
2700                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
2701         else
2702                 intel_set_m_n(dev_priv, m_n,
2703                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2704                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2705 }
2706
2707 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2708                                     enum transcoder transcoder,
2709                                     const struct intel_link_m_n *m_n)
2710 {
2711         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2712
2713         if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2714                 return;
2715
2716         intel_set_m_n(dev_priv, m_n,
2717                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
2718                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
2719 }
2720
2721 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2722 {
2723         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2724         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2725         enum pipe pipe = crtc->pipe;
2726         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2727         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2728         u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2729         int vsyncshift = 0;
2730
2731         /* We need to be careful not to changed the adjusted mode, for otherwise
2732          * the hw state checker will get angry at the mismatch. */
2733         crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2734         crtc_vtotal = adjusted_mode->crtc_vtotal;
2735         crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2736         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2737
2738         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2739                 /* the chip adds 2 halflines automatically */
2740                 crtc_vtotal -= 1;
2741                 crtc_vblank_end -= 1;
2742
2743                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2744                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2745                 else
2746                         vsyncshift = adjusted_mode->crtc_hsync_start -
2747                                 adjusted_mode->crtc_htotal / 2;
2748                 if (vsyncshift < 0)
2749                         vsyncshift += adjusted_mode->crtc_htotal;
2750         }
2751
2752         /*
2753          * VBLANK_START no longer works on ADL+, instead we must use
2754          * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
2755          */
2756         if (DISPLAY_VER(dev_priv) >= 13) {
2757                 intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
2758                                crtc_vblank_start - crtc_vdisplay);
2759
2760                 /*
2761                  * VBLANK_START not used by hw, just clear it
2762                  * to make it stand out in register dumps.
2763                  */
2764                 crtc_vblank_start = 1;
2765         }
2766
2767         if (DISPLAY_VER(dev_priv) > 3)
2768                 intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
2769                                vsyncshift);
2770
2771         intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
2772                        HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
2773                        HTOTAL(adjusted_mode->crtc_htotal - 1));
2774         intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
2775                        HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
2776                        HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
2777         intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
2778                        HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
2779                        HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
2780
2781         intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
2782                        VACTIVE(crtc_vdisplay - 1) |
2783                        VTOTAL(crtc_vtotal - 1));
2784         intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
2785                        VBLANK_START(crtc_vblank_start - 1) |
2786                        VBLANK_END(crtc_vblank_end - 1));
2787         intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
2788                        VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
2789                        VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
2790
2791         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2792          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2793          * documented on the DDI_FUNC_CTL register description, EDP Input Select
2794          * bits. */
2795         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2796             (pipe == PIPE_B || pipe == PIPE_C))
2797                 intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
2798                                VACTIVE(crtc_vdisplay - 1) |
2799                                VTOTAL(crtc_vtotal - 1));
2800 }
2801
2802 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2803 {
2804         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2805         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2806         int width = drm_rect_width(&crtc_state->pipe_src);
2807         int height = drm_rect_height(&crtc_state->pipe_src);
2808         enum pipe pipe = crtc->pipe;
2809
2810         /* pipesrc controls the size that is scaled from, which should
2811          * always be the user's requested size.
2812          */
2813         intel_de_write(dev_priv, PIPESRC(pipe),
2814                        PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2815 }
2816
2817 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2818 {
2819         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2820         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2821
2822         if (DISPLAY_VER(dev_priv) == 2)
2823                 return false;
2824
2825         if (DISPLAY_VER(dev_priv) >= 9 ||
2826             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2827                 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
2828         else
2829                 return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
2830 }
2831
2832 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2833                                          struct intel_crtc_state *pipe_config)
2834 {
2835         struct drm_device *dev = crtc->base.dev;
2836         struct drm_i915_private *dev_priv = to_i915(dev);
2837         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2838         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2839         u32 tmp;
2840
2841         tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
2842         adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
2843         adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
2844
2845         if (!transcoder_is_dsi(cpu_transcoder)) {
2846                 tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
2847                 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
2848                 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
2849         }
2850
2851         tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
2852         adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
2853         adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
2854
2855         tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
2856         adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
2857         adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
2858
2859         /* FIXME TGL+ DSI transcoders have this! */
2860         if (!transcoder_is_dsi(cpu_transcoder)) {
2861                 tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
2862                 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
2863                 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
2864         }
2865         tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
2866         adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
2867         adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
2868
2869         if (intel_pipe_is_interlaced(pipe_config)) {
2870                 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
2871                 adjusted_mode->crtc_vtotal += 1;
2872                 adjusted_mode->crtc_vblank_end += 1;
2873         }
2874
2875         if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
2876                 adjusted_mode->crtc_vblank_start =
2877                         adjusted_mode->crtc_vdisplay +
2878                         intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
2879 }
2880
2881 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2882 {
2883         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2884         int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
2885         enum pipe master_pipe, pipe = crtc->pipe;
2886         int width;
2887
2888         if (num_pipes < 2)
2889                 return;
2890
2891         master_pipe = bigjoiner_master_pipe(crtc_state);
2892         width = drm_rect_width(&crtc_state->pipe_src);
2893
2894         drm_rect_translate_to(&crtc_state->pipe_src,
2895                               (pipe - master_pipe) * width, 0);
2896 }
2897
2898 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
2899                                     struct intel_crtc_state *pipe_config)
2900 {
2901         struct drm_device *dev = crtc->base.dev;
2902         struct drm_i915_private *dev_priv = to_i915(dev);
2903         u32 tmp;
2904
2905         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
2906
2907         drm_rect_init(&pipe_config->pipe_src, 0, 0,
2908                       REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
2909                       REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
2910
2911         intel_bigjoiner_adjust_pipe_src(pipe_config);
2912 }
2913
2914 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
2915 {
2916         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2917         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2918         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2919         u32 val = 0;
2920
2921         /*
2922          * - We keep both pipes enabled on 830
2923          * - During modeset the pipe is still disabled and must remain so
2924          * - During fastset the pipe is already enabled and must remain so
2925          */
2926         if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
2927                 val |= TRANSCONF_ENABLE;
2928
2929         if (crtc_state->double_wide)
2930                 val |= TRANSCONF_DOUBLE_WIDE;
2931
2932         /* only g4x and later have fancy bpc/dither controls */
2933         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2934             IS_CHERRYVIEW(dev_priv)) {
2935                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
2936                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
2937                         val |= TRANSCONF_DITHER_EN |
2938                                 TRANSCONF_DITHER_TYPE_SP;
2939
2940                 switch (crtc_state->pipe_bpp) {
2941                 default:
2942                         /* Case prevented by intel_choose_pipe_bpp_dither. */
2943                         MISSING_CASE(crtc_state->pipe_bpp);
2944                         fallthrough;
2945                 case 18:
2946                         val |= TRANSCONF_BPC_6;
2947                         break;
2948                 case 24:
2949                         val |= TRANSCONF_BPC_8;
2950                         break;
2951                 case 30:
2952                         val |= TRANSCONF_BPC_10;
2953                         break;
2954                 }
2955         }
2956
2957         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2958                 if (DISPLAY_VER(dev_priv) < 4 ||
2959                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2960                         val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
2961                 else
2962                         val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
2963         } else {
2964                 val |= TRANSCONF_INTERLACE_PROGRESSIVE;
2965         }
2966
2967         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2968              crtc_state->limited_color_range)
2969                 val |= TRANSCONF_COLOR_RANGE_SELECT;
2970
2971         val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
2972
2973         val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
2974
2975         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
2976         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
2977 }
2978
2979 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
2980 {
2981         if (IS_I830(dev_priv))
2982                 return false;
2983
2984         return DISPLAY_VER(dev_priv) >= 4 ||
2985                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
2986 }
2987
2988 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
2989 {
2990         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2991         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2992         u32 tmp;
2993
2994         if (!i9xx_has_pfit(dev_priv))
2995                 return;
2996
2997         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
2998         if (!(tmp & PFIT_ENABLE))
2999                 return;
3000
3001         /* Check whether the pfit is attached to our pipe. */
3002         if (DISPLAY_VER(dev_priv) < 4) {
3003                 if (crtc->pipe != PIPE_B)
3004                         return;
3005         } else {
3006                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3007                         return;
3008         }
3009
3010         crtc_state->gmch_pfit.control = tmp;
3011         crtc_state->gmch_pfit.pgm_ratios =
3012                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3013 }
3014
3015 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3016                                struct intel_crtc_state *pipe_config)
3017 {
3018         struct drm_device *dev = crtc->base.dev;
3019         struct drm_i915_private *dev_priv = to_i915(dev);
3020         enum pipe pipe = crtc->pipe;
3021         struct dpll clock;
3022         u32 mdiv;
3023         int refclk = 100000;
3024
3025         /* In case of DSI, DPLL will not be used */
3026         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3027                 return;
3028
3029         vlv_dpio_get(dev_priv);
3030         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3031         vlv_dpio_put(dev_priv);
3032
3033         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3034         clock.m2 = mdiv & DPIO_M2DIV_MASK;
3035         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3036         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3037         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3038
3039         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3040 }
3041
3042 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3043                                struct intel_crtc_state *pipe_config)
3044 {
3045         struct drm_device *dev = crtc->base.dev;
3046         struct drm_i915_private *dev_priv = to_i915(dev);
3047         enum pipe pipe = crtc->pipe;
3048         enum dpio_channel port = vlv_pipe_to_channel(pipe);
3049         struct dpll clock;
3050         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3051         int refclk = 100000;
3052
3053         /* In case of DSI, DPLL will not be used */
3054         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3055                 return;
3056
3057         vlv_dpio_get(dev_priv);
3058         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3059         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3060         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3061         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3062         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3063         vlv_dpio_put(dev_priv);
3064
3065         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3066         clock.m2 = (pll_dw0 & 0xff) << 22;
3067         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3068                 clock.m2 |= pll_dw2 & 0x3fffff;
3069         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3070         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3071         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3072
3073         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3074 }
3075
3076 static enum intel_output_format
3077 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3078 {
3079         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3080         u32 tmp;
3081
3082         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3083
3084         if (tmp & PIPEMISC_YUV420_ENABLE) {
3085                 /* We support 4:2:0 in full blend mode only */
3086                 drm_WARN_ON(&dev_priv->drm,
3087                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3088
3089                 return INTEL_OUTPUT_FORMAT_YCBCR420;
3090         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3091                 return INTEL_OUTPUT_FORMAT_YCBCR444;
3092         } else {
3093                 return INTEL_OUTPUT_FORMAT_RGB;
3094         }
3095 }
3096
3097 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3098 {
3099         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3100         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3101         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3102         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3103         u32 tmp;
3104
3105         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3106
3107         if (tmp & DISP_PIPE_GAMMA_ENABLE)
3108                 crtc_state->gamma_enable = true;
3109
3110         if (!HAS_GMCH(dev_priv) &&
3111             tmp & DISP_PIPE_CSC_ENABLE)
3112                 crtc_state->csc_enable = true;
3113 }
3114
3115 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3116                                  struct intel_crtc_state *pipe_config)
3117 {
3118         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3119         enum intel_display_power_domain power_domain;
3120         intel_wakeref_t wakeref;
3121         u32 tmp;
3122         bool ret;
3123
3124         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3125         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3126         if (!wakeref)
3127                 return false;
3128
3129         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3130         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3131         pipe_config->shared_dpll = NULL;
3132
3133         ret = false;
3134
3135         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3136         if (!(tmp & TRANSCONF_ENABLE))
3137                 goto out;
3138
3139         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3140             IS_CHERRYVIEW(dev_priv)) {
3141                 switch (tmp & TRANSCONF_BPC_MASK) {
3142                 case TRANSCONF_BPC_6:
3143                         pipe_config->pipe_bpp = 18;
3144                         break;
3145                 case TRANSCONF_BPC_8:
3146                         pipe_config->pipe_bpp = 24;
3147                         break;
3148                 case TRANSCONF_BPC_10:
3149                         pipe_config->pipe_bpp = 30;
3150                         break;
3151                 default:
3152                         MISSING_CASE(tmp);
3153                         break;
3154                 }
3155         }
3156
3157         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3158             (tmp & TRANSCONF_COLOR_RANGE_SELECT))
3159                 pipe_config->limited_color_range = true;
3160
3161         pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
3162
3163         pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3164
3165         if (IS_CHERRYVIEW(dev_priv))
3166                 pipe_config->cgm_mode = intel_de_read(dev_priv,
3167                                                       CGM_PIPE_MODE(crtc->pipe));
3168
3169         i9xx_get_pipe_color_config(pipe_config);
3170         intel_color_get_config(pipe_config);
3171
3172         if (DISPLAY_VER(dev_priv) < 4)
3173                 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
3174
3175         intel_get_transcoder_timings(crtc, pipe_config);
3176         intel_get_pipe_src_size(crtc, pipe_config);
3177
3178         i9xx_get_pfit_config(pipe_config);
3179
3180         if (DISPLAY_VER(dev_priv) >= 4) {
3181                 /* No way to read it out on pipes B and C */
3182                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3183                         tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
3184                 else
3185                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3186                 pipe_config->pixel_multiplier =
3187                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3188                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3189                 pipe_config->dpll_hw_state.dpll_md = tmp;
3190         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3191                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3192                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3193                 pipe_config->pixel_multiplier =
3194                         ((tmp & SDVO_MULTIPLIER_MASK)
3195                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3196         } else {
3197                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3198                  * port and will be fixed up in the encoder->get_config
3199                  * function. */
3200                 pipe_config->pixel_multiplier = 1;
3201         }
3202         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3203                                                         DPLL(crtc->pipe));
3204         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3205                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3206                                                                FP0(crtc->pipe));
3207                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3208                                                                FP1(crtc->pipe));
3209         } else {
3210                 /* Mask out read-only status bits. */
3211                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3212                                                      DPLL_PORTC_READY_MASK |
3213                                                      DPLL_PORTB_READY_MASK);
3214         }
3215
3216         if (IS_CHERRYVIEW(dev_priv))
3217                 chv_crtc_clock_get(crtc, pipe_config);
3218         else if (IS_VALLEYVIEW(dev_priv))
3219                 vlv_crtc_clock_get(crtc, pipe_config);
3220         else
3221                 i9xx_crtc_clock_get(crtc, pipe_config);
3222
3223         /*
3224          * Normally the dotclock is filled in by the encoder .get_config()
3225          * but in case the pipe is enabled w/o any ports we need a sane
3226          * default.
3227          */
3228         pipe_config->hw.adjusted_mode.crtc_clock =
3229                 pipe_config->port_clock / pipe_config->pixel_multiplier;
3230
3231         ret = true;
3232
3233 out:
3234         intel_display_power_put(dev_priv, power_domain, wakeref);
3235
3236         return ret;
3237 }
3238
3239 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3240 {
3241         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3242         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3243         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3244         u32 val = 0;
3245
3246         /*
3247          * - During modeset the pipe is still disabled and must remain so
3248          * - During fastset the pipe is already enabled and must remain so
3249          */
3250         if (!intel_crtc_needs_modeset(crtc_state))
3251                 val |= TRANSCONF_ENABLE;
3252
3253         switch (crtc_state->pipe_bpp) {
3254         default:
3255                 /* Case prevented by intel_choose_pipe_bpp_dither. */
3256                 MISSING_CASE(crtc_state->pipe_bpp);
3257                 fallthrough;
3258         case 18:
3259                 val |= TRANSCONF_BPC_6;
3260                 break;
3261         case 24:
3262                 val |= TRANSCONF_BPC_8;
3263                 break;
3264         case 30:
3265                 val |= TRANSCONF_BPC_10;
3266                 break;
3267         case 36:
3268                 val |= TRANSCONF_BPC_12;
3269                 break;
3270         }
3271
3272         if (crtc_state->dither)
3273                 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3274
3275         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3276                 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3277         else
3278                 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3279
3280         /*
3281          * This would end up with an odd purple hue over
3282          * the entire display. Make sure we don't do it.
3283          */
3284         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3285                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3286
3287         if (crtc_state->limited_color_range &&
3288             !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3289                 val |= TRANSCONF_COLOR_RANGE_SELECT;
3290
3291         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3292                 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
3293
3294         val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
3295
3296         val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3297         val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3298
3299         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3300         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3301 }
3302
3303 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3304 {
3305         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3306         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3307         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3308         u32 val = 0;
3309
3310         /*
3311          * - During modeset the pipe is still disabled and must remain so
3312          * - During fastset the pipe is already enabled and must remain so
3313          */
3314         if (!intel_crtc_needs_modeset(crtc_state))
3315                 val |= TRANSCONF_ENABLE;
3316
3317         if (IS_HASWELL(dev_priv) && crtc_state->dither)
3318                 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3319
3320         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3321                 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3322         else
3323                 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3324
3325         if (IS_HASWELL(dev_priv) &&
3326             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3327                 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
3328
3329         intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
3330         intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
3331 }
3332
3333 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3334 {
3335         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3336         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3337         u32 val = 0;
3338
3339         switch (crtc_state->pipe_bpp) {
3340         case 18:
3341                 val |= PIPEMISC_BPC_6;
3342                 break;
3343         case 24:
3344                 val |= PIPEMISC_BPC_8;
3345                 break;
3346         case 30:
3347                 val |= PIPEMISC_BPC_10;
3348                 break;
3349         case 36:
3350                 /* Port output 12BPC defined for ADLP+ */
3351                 if (DISPLAY_VER(dev_priv) > 12)
3352                         val |= PIPEMISC_BPC_12_ADLP;
3353                 break;
3354         default:
3355                 MISSING_CASE(crtc_state->pipe_bpp);
3356                 break;
3357         }
3358
3359         if (crtc_state->dither)
3360                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3361
3362         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3363             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3364                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3365
3366         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3367                 val |= PIPEMISC_YUV420_ENABLE |
3368                         PIPEMISC_YUV420_MODE_FULL_BLEND;
3369
3370         if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3371                 val |= PIPEMISC_HDR_MODE_PRECISION;
3372
3373         if (DISPLAY_VER(dev_priv) >= 12)
3374                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3375
3376         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3377 }
3378
3379 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3380 {
3381         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3382         u32 tmp;
3383
3384         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3385
3386         switch (tmp & PIPEMISC_BPC_MASK) {
3387         case PIPEMISC_BPC_6:
3388                 return 18;
3389         case PIPEMISC_BPC_8:
3390                 return 24;
3391         case PIPEMISC_BPC_10:
3392                 return 30;
3393         /*
3394          * PORT OUTPUT 12 BPC defined for ADLP+.
3395          *
3396          * TODO:
3397          * For previous platforms with DSI interface, bits 5:7
3398          * are used for storing pipe_bpp irrespective of dithering.
3399          * Since the value of 12 BPC is not defined for these bits
3400          * on older platforms, need to find a workaround for 12 BPC
3401          * MIPI DSI HW readout.
3402          */
3403         case PIPEMISC_BPC_12_ADLP:
3404                 if (DISPLAY_VER(dev_priv) > 12)
3405                         return 36;
3406                 fallthrough;
3407         default:
3408                 MISSING_CASE(tmp);
3409                 return 0;
3410         }
3411 }
3412
3413 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3414 {
3415         /*
3416          * Account for spread spectrum to avoid
3417          * oversubscribing the link. Max center spread
3418          * is 2.5%; use 5% for safety's sake.
3419          */
3420         u32 bps = target_clock * bpp * 21 / 20;
3421         return DIV_ROUND_UP(bps, link_bw * 8);
3422 }
3423
3424 void intel_get_m_n(struct drm_i915_private *i915,
3425                    struct intel_link_m_n *m_n,
3426                    i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3427                    i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3428 {
3429         m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3430         m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3431         m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3432         m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3433         m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3434 }
3435
3436 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3437                                     enum transcoder transcoder,
3438                                     struct intel_link_m_n *m_n)
3439 {
3440         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3441         enum pipe pipe = crtc->pipe;
3442
3443         if (DISPLAY_VER(dev_priv) >= 5)
3444                 intel_get_m_n(dev_priv, m_n,
3445                               PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
3446                               PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
3447         else
3448                 intel_get_m_n(dev_priv, m_n,
3449                               PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3450                               PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3451 }
3452
3453 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3454                                     enum transcoder transcoder,
3455                                     struct intel_link_m_n *m_n)
3456 {
3457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3458
3459         if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3460                 return;
3461
3462         intel_get_m_n(dev_priv, m_n,
3463                       PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
3464                       PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
3465 }
3466
3467 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3468                                   u32 pos, u32 size)
3469 {
3470         drm_rect_init(&crtc_state->pch_pfit.dst,
3471                       pos >> 16, pos & 0xffff,
3472                       size >> 16, size & 0xffff);
3473 }
3474
3475 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3476 {
3477         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3478         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3479         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3480         int id = -1;
3481         int i;
3482
3483         /* find scaler attached to this pipe */
3484         for (i = 0; i < crtc->num_scalers; i++) {
3485                 u32 ctl, pos, size;
3486
3487                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3488                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3489                         continue;
3490
3491                 id = i;
3492                 crtc_state->pch_pfit.enabled = true;
3493
3494                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3495                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3496
3497                 ilk_get_pfit_pos_size(crtc_state, pos, size);
3498
3499                 scaler_state->scalers[i].in_use = true;
3500                 break;
3501         }
3502
3503         scaler_state->scaler_id = id;
3504         if (id >= 0)
3505                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3506         else
3507                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3508 }
3509
3510 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3511 {
3512         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3513         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3514         u32 ctl, pos, size;
3515
3516         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3517         if ((ctl & PF_ENABLE) == 0)
3518                 return;
3519
3520         crtc_state->pch_pfit.enabled = true;
3521
3522         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3523         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3524
3525         ilk_get_pfit_pos_size(crtc_state, pos, size);
3526
3527         /*
3528          * We currently do not free assignements of panel fitters on
3529          * ivb/hsw (since we don't use the higher upscaling modes which
3530          * differentiates them) so just WARN about this case for now.
3531          */
3532         drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
3533                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
3534 }
3535
3536 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3537                                 struct intel_crtc_state *pipe_config)
3538 {
3539         struct drm_device *dev = crtc->base.dev;
3540         struct drm_i915_private *dev_priv = to_i915(dev);
3541         enum intel_display_power_domain power_domain;
3542         intel_wakeref_t wakeref;
3543         u32 tmp;
3544         bool ret;
3545
3546         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3547         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3548         if (!wakeref)
3549                 return false;
3550
3551         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3552         pipe_config->shared_dpll = NULL;
3553
3554         ret = false;
3555         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3556         if (!(tmp & TRANSCONF_ENABLE))
3557                 goto out;
3558
3559         switch (tmp & TRANSCONF_BPC_MASK) {
3560         case TRANSCONF_BPC_6:
3561                 pipe_config->pipe_bpp = 18;
3562                 break;
3563         case TRANSCONF_BPC_8:
3564                 pipe_config->pipe_bpp = 24;
3565                 break;
3566         case TRANSCONF_BPC_10:
3567                 pipe_config->pipe_bpp = 30;
3568                 break;
3569         case TRANSCONF_BPC_12:
3570                 pipe_config->pipe_bpp = 36;
3571                 break;
3572         default:
3573                 break;
3574         }
3575
3576         if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
3577                 pipe_config->limited_color_range = true;
3578
3579         switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
3580         case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
3581         case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
3582                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3583                 break;
3584         default:
3585                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3586                 break;
3587         }
3588
3589         pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
3590
3591         pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3592
3593         pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
3594
3595         pipe_config->csc_mode = intel_de_read(dev_priv,
3596                                               PIPE_CSC_MODE(crtc->pipe));
3597
3598         i9xx_get_pipe_color_config(pipe_config);
3599         intel_color_get_config(pipe_config);
3600
3601         pipe_config->pixel_multiplier = 1;
3602
3603         ilk_pch_get_config(pipe_config);
3604
3605         intel_get_transcoder_timings(crtc, pipe_config);
3606         intel_get_pipe_src_size(crtc, pipe_config);
3607
3608         ilk_get_pfit_config(pipe_config);
3609
3610         ret = true;
3611
3612 out:
3613         intel_display_power_put(dev_priv, power_domain, wakeref);
3614
3615         return ret;
3616 }
3617
3618 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
3619 {
3620         u8 pipes;
3621
3622         if (DISPLAY_VER(i915) >= 12)
3623                 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3624         else if (DISPLAY_VER(i915) >= 11)
3625                 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3626         else
3627                 pipes = 0;
3628
3629         return pipes & RUNTIME_INFO(i915)->pipe_mask;
3630 }
3631
3632 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3633                                            enum transcoder cpu_transcoder)
3634 {
3635         enum intel_display_power_domain power_domain;
3636         intel_wakeref_t wakeref;
3637         u32 tmp = 0;
3638
3639         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3640
3641         with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3642                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3643
3644         return tmp & TRANS_DDI_FUNC_ENABLE;
3645 }
3646
3647 static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
3648                                     u8 *master_pipes, u8 *slave_pipes)
3649 {
3650         struct intel_crtc *crtc;
3651
3652         *master_pipes = 0;
3653         *slave_pipes = 0;
3654
3655         for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3656                                          bigjoiner_pipes(dev_priv)) {
3657                 enum intel_display_power_domain power_domain;
3658                 enum pipe pipe = crtc->pipe;
3659                 intel_wakeref_t wakeref;
3660
3661                 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3662                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3663                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3664
3665                         if (!(tmp & BIG_JOINER_ENABLE))
3666                                 continue;
3667
3668                         if (tmp & MASTER_BIG_JOINER_ENABLE)
3669                                 *master_pipes |= BIT(pipe);
3670                         else
3671                                 *slave_pipes |= BIT(pipe);
3672                 }
3673
3674                 if (DISPLAY_VER(dev_priv) < 13)
3675                         continue;
3676
3677                 power_domain = POWER_DOMAIN_PIPE(pipe);
3678                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3679                         u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3680
3681                         if (tmp & UNCOMPRESSED_JOINER_MASTER)
3682                                 *master_pipes |= BIT(pipe);
3683                         if (tmp & UNCOMPRESSED_JOINER_SLAVE)
3684                                 *slave_pipes |= BIT(pipe);
3685                 }
3686         }
3687
3688         /* Bigjoiner pipes should always be consecutive master and slave */
3689         drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
3690                  "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
3691                  *master_pipes, *slave_pipes);
3692 }
3693
3694 static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3695 {
3696         if ((slave_pipes & BIT(pipe)) == 0)
3697                 return pipe;
3698
3699         /* ignore everything above our pipe */
3700         master_pipes &= ~GENMASK(7, pipe);
3701
3702         /* highest remaining bit should be our master pipe */
3703         return fls(master_pipes) - 1;
3704 }
3705
3706 static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
3707 {
3708         enum pipe master_pipe, next_master_pipe;
3709
3710         master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
3711
3712         if ((master_pipes & BIT(master_pipe)) == 0)
3713                 return 0;
3714
3715         /* ignore our master pipe and everything below it */
3716         master_pipes &= ~GENMASK(master_pipe, 0);
3717         /* make sure a high bit is set for the ffs() */
3718         master_pipes |= BIT(7);
3719         /* lowest remaining bit should be the next master pipe */
3720         next_master_pipe = ffs(master_pipes) - 1;
3721
3722         return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
3723 }
3724
3725 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3726 {
3727         u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3728
3729         if (DISPLAY_VER(i915) >= 11)
3730                 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3731
3732         return panel_transcoder_mask;
3733 }
3734
3735 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3736 {
3737         struct drm_device *dev = crtc->base.dev;
3738         struct drm_i915_private *dev_priv = to_i915(dev);
3739         u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3740         enum transcoder cpu_transcoder;
3741         u8 master_pipes, slave_pipes;
3742         u8 enabled_transcoders = 0;
3743
3744         /*
3745          * XXX: Do intel_display_power_get_if_enabled before reading this (for
3746          * consistency and less surprising code; it's in always on power).
3747          */
3748         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3749                                        panel_transcoder_mask) {
3750                 enum intel_display_power_domain power_domain;
3751                 intel_wakeref_t wakeref;
3752                 enum pipe trans_pipe;
3753                 u32 tmp = 0;
3754
3755                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3756                 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3757                         tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
3758
3759                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3760                         continue;
3761
3762                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3763                 default:
3764                         drm_WARN(dev, 1,
3765                                  "unknown pipe linked to transcoder %s\n",
3766                                  transcoder_name(cpu_transcoder));
3767                         fallthrough;
3768                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3769                 case TRANS_DDI_EDP_INPUT_A_ON:
3770                         trans_pipe = PIPE_A;
3771                         break;
3772                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3773                         trans_pipe = PIPE_B;
3774                         break;
3775                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3776                         trans_pipe = PIPE_C;
3777                         break;
3778                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3779                         trans_pipe = PIPE_D;
3780                         break;
3781                 }
3782
3783                 if (trans_pipe == crtc->pipe)
3784                         enabled_transcoders |= BIT(cpu_transcoder);
3785         }
3786
3787         /* single pipe or bigjoiner master */
3788         cpu_transcoder = (enum transcoder) crtc->pipe;
3789         if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3790                 enabled_transcoders |= BIT(cpu_transcoder);
3791
3792         /* bigjoiner slave -> consider the master pipe's transcoder as well */
3793         enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
3794         if (slave_pipes & BIT(crtc->pipe)) {
3795                 cpu_transcoder = (enum transcoder)
3796                         get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
3797                 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3798                         enabled_transcoders |= BIT(cpu_transcoder);
3799         }
3800
3801         return enabled_transcoders;
3802 }
3803
3804 static bool has_edp_transcoders(u8 enabled_transcoders)
3805 {
3806         return enabled_transcoders & BIT(TRANSCODER_EDP);
3807 }
3808
3809 static bool has_dsi_transcoders(u8 enabled_transcoders)
3810 {
3811         return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3812                                       BIT(TRANSCODER_DSI_1));
3813 }
3814
3815 static bool has_pipe_transcoders(u8 enabled_transcoders)
3816 {
3817         return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3818                                        BIT(TRANSCODER_DSI_0) |
3819                                        BIT(TRANSCODER_DSI_1));
3820 }
3821
3822 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3823                                        u8 enabled_transcoders)
3824 {
3825         /* Only one type of transcoder please */
3826         drm_WARN_ON(&i915->drm,
3827                     has_edp_transcoders(enabled_transcoders) +
3828                     has_dsi_transcoders(enabled_transcoders) +
3829                     has_pipe_transcoders(enabled_transcoders) > 1);
3830
3831         /* Only DSI transcoders can be ganged */
3832         drm_WARN_ON(&i915->drm,
3833                     !has_dsi_transcoders(enabled_transcoders) &&
3834                     !is_power_of_2(enabled_transcoders));
3835 }
3836
3837 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3838                                      struct intel_crtc_state *pipe_config,
3839                                      struct intel_display_power_domain_set *power_domain_set)
3840 {
3841         struct drm_device *dev = crtc->base.dev;
3842         struct drm_i915_private *dev_priv = to_i915(dev);
3843         unsigned long enabled_transcoders;
3844         u32 tmp;
3845
3846         enabled_transcoders = hsw_enabled_transcoders(crtc);
3847         if (!enabled_transcoders)
3848                 return false;
3849
3850         assert_enabled_transcoders(dev_priv, enabled_transcoders);
3851
3852         /*
3853          * With the exception of DSI we should only ever have
3854          * a single enabled transcoder. With DSI let's just
3855          * pick the first one.
3856          */
3857         pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3858
3859         if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3860                                                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3861                 return false;
3862
3863         if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3864                 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3865
3866                 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3867                         pipe_config->pch_pfit.force_thru = true;
3868         }
3869
3870         tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
3871
3872         return tmp & TRANSCONF_ENABLE;
3873 }
3874
3875 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3876                                          struct intel_crtc_state *pipe_config,
3877                                          struct intel_display_power_domain_set *power_domain_set)
3878 {
3879         struct drm_device *dev = crtc->base.dev;
3880         struct drm_i915_private *dev_priv = to_i915(dev);
3881         enum transcoder cpu_transcoder;
3882         enum port port;
3883         u32 tmp;
3884
3885         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3886                 if (port == PORT_A)
3887                         cpu_transcoder = TRANSCODER_DSI_A;
3888                 else
3889                         cpu_transcoder = TRANSCODER_DSI_C;
3890
3891                 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3892                                                                POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3893                         continue;
3894
3895                 /*
3896                  * The PLL needs to be enabled with a valid divider
3897                  * configuration, otherwise accessing DSI registers will hang
3898                  * the machine. See BSpec North Display Engine
3899                  * registers/MIPI[BXT]. We can break out here early, since we
3900                  * need the same DSI PLL to be enabled for both DSI ports.
3901                  */
3902                 if (!bxt_dsi_pll_is_enabled(dev_priv))
3903                         break;
3904
3905                 /* XXX: this works for video mode only */
3906                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
3907                 if (!(tmp & DPI_ENABLE))
3908                         continue;
3909
3910                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
3911                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
3912                         continue;
3913
3914                 pipe_config->cpu_transcoder = cpu_transcoder;
3915                 break;
3916         }
3917
3918         return transcoder_is_dsi(pipe_config->cpu_transcoder);
3919 }
3920
3921 static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
3922 {
3923         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3924         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3925         u8 master_pipes, slave_pipes;
3926         enum pipe pipe = crtc->pipe;
3927
3928         enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
3929
3930         if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
3931                 return;
3932
3933         crtc_state->bigjoiner_pipes =
3934                 BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
3935                 get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
3936 }
3937
3938 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
3939                                 struct intel_crtc_state *pipe_config)
3940 {
3941         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3942         bool active;
3943         u32 tmp;
3944
3945         if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3946                                                        POWER_DOMAIN_PIPE(crtc->pipe)))
3947                 return false;
3948
3949         pipe_config->shared_dpll = NULL;
3950
3951         active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
3952
3953         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
3954             bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
3955                 drm_WARN_ON(&dev_priv->drm, active);
3956                 active = true;
3957         }
3958
3959         if (!active)
3960                 goto out;
3961
3962         intel_dsc_get_config(pipe_config);
3963         intel_bigjoiner_get_config(pipe_config);
3964
3965         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
3966             DISPLAY_VER(dev_priv) >= 11)
3967                 intel_get_transcoder_timings(crtc, pipe_config);
3968
3969         if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
3970                 intel_vrr_get_config(crtc, pipe_config);
3971
3972         intel_get_pipe_src_size(crtc, pipe_config);
3973
3974         if (IS_HASWELL(dev_priv)) {
3975                 u32 tmp = intel_de_read(dev_priv,
3976                                         TRANSCONF(pipe_config->cpu_transcoder));
3977
3978                 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
3979                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3980                 else
3981                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3982         } else {
3983                 pipe_config->output_format =
3984                         bdw_get_pipemisc_output_format(crtc);
3985         }
3986
3987         pipe_config->gamma_mode = intel_de_read(dev_priv,
3988                                                 GAMMA_MODE(crtc->pipe));
3989
3990         pipe_config->csc_mode = intel_de_read(dev_priv,
3991                                               PIPE_CSC_MODE(crtc->pipe));
3992
3993         if (DISPLAY_VER(dev_priv) >= 9) {
3994                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
3995
3996                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
3997                         pipe_config->gamma_enable = true;
3998
3999                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4000                         pipe_config->csc_enable = true;
4001         } else {
4002                 i9xx_get_pipe_color_config(pipe_config);
4003         }
4004
4005         intel_color_get_config(pipe_config);
4006
4007         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4008         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4009         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4010                 pipe_config->ips_linetime =
4011                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4012
4013         if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
4014                                                       POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4015                 if (DISPLAY_VER(dev_priv) >= 9)
4016                         skl_get_pfit_config(pipe_config);
4017                 else
4018                         ilk_get_pfit_config(pipe_config);
4019         }
4020
4021         hsw_ips_get_config(pipe_config);
4022
4023         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4024             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4025                 pipe_config->pixel_multiplier =
4026                         intel_de_read(dev_priv,
4027                                       TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
4028         } else {
4029                 pipe_config->pixel_multiplier = 1;
4030         }
4031
4032         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4033                 tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
4034                                     MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
4035                                     CHICKEN_TRANS(pipe_config->cpu_transcoder));
4036
4037                 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
4038         } else {
4039                 /* no idea if this is correct */
4040                 pipe_config->framestart_delay = 1;
4041         }
4042
4043 out:
4044         intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
4045
4046         return active;
4047 }
4048
4049 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4050 {
4051         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4052         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4053
4054         if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
4055                 return false;
4056
4057         crtc_state->hw.active = true;
4058
4059         intel_crtc_readout_derived_state(crtc_state);
4060
4061         return true;
4062 }
4063
4064 /* VESA 640x480x72Hz mode to set on the pipe */
4065 static const struct drm_display_mode load_detect_mode = {
4066         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4067                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4068 };
4069
4070 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4071                                         struct drm_crtc *crtc)
4072 {
4073         struct drm_plane *plane;
4074         struct drm_plane_state *plane_state;
4075         int ret, i;
4076
4077         ret = drm_atomic_add_affected_planes(state, crtc);
4078         if (ret)
4079                 return ret;
4080
4081         for_each_new_plane_in_state(state, plane, plane_state, i) {
4082                 if (plane_state->crtc != crtc)
4083                         continue;
4084
4085                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4086                 if (ret)
4087                         return ret;
4088
4089                 drm_atomic_set_fb_for_plane(plane_state, NULL);
4090         }
4091
4092         return 0;
4093 }
4094
4095 int intel_get_load_detect_pipe(struct drm_connector *connector,
4096                                struct intel_load_detect_pipe *old,
4097                                struct drm_modeset_acquire_ctx *ctx)
4098 {
4099         struct intel_encoder *encoder =
4100                 intel_attached_encoder(to_intel_connector(connector));
4101         struct intel_crtc *possible_crtc;
4102         struct intel_crtc *crtc = NULL;
4103         struct drm_device *dev = encoder->base.dev;
4104         struct drm_i915_private *dev_priv = to_i915(dev);
4105         struct drm_mode_config *config = &dev->mode_config;
4106         struct drm_atomic_state *state = NULL, *restore_state = NULL;
4107         struct drm_connector_state *connector_state;
4108         struct intel_crtc_state *crtc_state;
4109         int ret;
4110
4111         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4112                     connector->base.id, connector->name,
4113                     encoder->base.base.id, encoder->base.name);
4114
4115         old->restore_state = NULL;
4116
4117         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4118
4119         /*
4120          * Algorithm gets a little messy:
4121          *
4122          *   - if the connector already has an assigned crtc, use it (but make
4123          *     sure it's on first)
4124          *
4125          *   - try to find the first unused crtc that can drive this connector,
4126          *     and use that if we find one
4127          */
4128
4129         /* See if we already have a CRTC for this connector */
4130         if (connector->state->crtc) {
4131                 crtc = to_intel_crtc(connector->state->crtc);
4132
4133                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4134                 if (ret)
4135                         goto fail;
4136
4137                 /* Make sure the crtc and connector are running */
4138                 goto found;
4139         }
4140
4141         /* Find an unused one (if possible) */
4142         for_each_intel_crtc(dev, possible_crtc) {
4143                 if (!(encoder->base.possible_crtcs &
4144                       drm_crtc_mask(&possible_crtc->base)))
4145                         continue;
4146
4147                 ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4148                 if (ret)
4149                         goto fail;
4150
4151                 if (possible_crtc->base.state->enable) {
4152                         drm_modeset_unlock(&possible_crtc->base.mutex);
4153                         continue;
4154                 }
4155
4156                 crtc = possible_crtc;
4157                 break;
4158         }
4159
4160         /*
4161          * If we didn't find an unused CRTC, don't use any.
4162          */
4163         if (!crtc) {
4164                 drm_dbg_kms(&dev_priv->drm,
4165                             "no pipe available for load-detect\n");
4166                 ret = -ENODEV;
4167                 goto fail;
4168         }
4169
4170 found:
4171         state = drm_atomic_state_alloc(dev);
4172         restore_state = drm_atomic_state_alloc(dev);
4173         if (!state || !restore_state) {
4174                 ret = -ENOMEM;
4175                 goto fail;
4176         }
4177
4178         state->acquire_ctx = ctx;
4179         restore_state->acquire_ctx = ctx;
4180
4181         connector_state = drm_atomic_get_connector_state(state, connector);
4182         if (IS_ERR(connector_state)) {
4183                 ret = PTR_ERR(connector_state);
4184                 goto fail;
4185         }
4186
4187         ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4188         if (ret)
4189                 goto fail;
4190
4191         crtc_state = intel_atomic_get_crtc_state(state, crtc);
4192         if (IS_ERR(crtc_state)) {
4193                 ret = PTR_ERR(crtc_state);
4194                 goto fail;
4195         }
4196
4197         crtc_state->uapi.active = true;
4198
4199         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4200                                            &load_detect_mode);
4201         if (ret)
4202                 goto fail;
4203
4204         ret = intel_modeset_disable_planes(state, &crtc->base);
4205         if (ret)
4206                 goto fail;
4207
4208         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4209         if (!ret)
4210                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4211         if (!ret)
4212                 ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4213         if (ret) {
4214                 drm_dbg_kms(&dev_priv->drm,
4215                             "Failed to create a copy of old state to restore: %i\n",
4216                             ret);
4217                 goto fail;
4218         }
4219
4220         ret = drm_atomic_commit(state);
4221         if (ret) {
4222                 drm_dbg_kms(&dev_priv->drm,
4223                             "failed to set mode on load-detect pipe\n");
4224                 goto fail;
4225         }
4226
4227         old->restore_state = restore_state;
4228         drm_atomic_state_put(state);
4229
4230         /* let the connector get through one full cycle before testing */
4231         intel_crtc_wait_for_next_vblank(crtc);
4232
4233         return true;
4234
4235 fail:
4236         if (state) {
4237                 drm_atomic_state_put(state);
4238                 state = NULL;
4239         }
4240         if (restore_state) {
4241                 drm_atomic_state_put(restore_state);
4242                 restore_state = NULL;
4243         }
4244
4245         if (ret == -EDEADLK)
4246                 return ret;
4247
4248         return false;
4249 }
4250
4251 void intel_release_load_detect_pipe(struct drm_connector *connector,
4252                                     struct intel_load_detect_pipe *old,
4253                                     struct drm_modeset_acquire_ctx *ctx)
4254 {
4255         struct intel_encoder *intel_encoder =
4256                 intel_attached_encoder(to_intel_connector(connector));
4257         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4258         struct drm_encoder *encoder = &intel_encoder->base;
4259         struct drm_atomic_state *state = old->restore_state;
4260         int ret;
4261
4262         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4263                     connector->base.id, connector->name,
4264                     encoder->base.id, encoder->name);
4265
4266         if (!state)
4267                 return;
4268
4269         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4270         if (ret)
4271                 drm_dbg_kms(&i915->drm,
4272                             "Couldn't release load detect pipe: %i\n", ret);
4273         drm_atomic_state_put(state);
4274 }
4275
4276 static int i9xx_pll_refclk(struct drm_device *dev,
4277                            const struct intel_crtc_state *pipe_config)
4278 {
4279         struct drm_i915_private *dev_priv = to_i915(dev);
4280         u32 dpll = pipe_config->dpll_hw_state.dpll;
4281
4282         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4283                 return dev_priv->display.vbt.lvds_ssc_freq;
4284         else if (HAS_PCH_SPLIT(dev_priv))
4285                 return 120000;
4286         else if (DISPLAY_VER(dev_priv) != 2)
4287                 return 96000;
4288         else
4289                 return 48000;
4290 }
4291
4292 /* Returns the clock of the currently programmed mode of the given pipe. */
4293 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4294                          struct intel_crtc_state *pipe_config)
4295 {
4296         struct drm_device *dev = crtc->base.dev;
4297         struct drm_i915_private *dev_priv = to_i915(dev);
4298         u32 dpll = pipe_config->dpll_hw_state.dpll;
4299         u32 fp;
4300         struct dpll clock;
4301         int port_clock;
4302         int refclk = i9xx_pll_refclk(dev, pipe_config);
4303
4304         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4305                 fp = pipe_config->dpll_hw_state.fp0;
4306         else
4307                 fp = pipe_config->dpll_hw_state.fp1;
4308
4309         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4310         if (IS_PINEVIEW(dev_priv)) {
4311                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4312                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4313         } else {
4314                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4315                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4316         }
4317
4318         if (DISPLAY_VER(dev_priv) != 2) {
4319                 if (IS_PINEVIEW(dev_priv))
4320                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4321                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4322                 else
4323                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4324                                DPLL_FPA01_P1_POST_DIV_SHIFT);
4325
4326                 switch (dpll & DPLL_MODE_MASK) {
4327                 case DPLLB_MODE_DAC_SERIAL:
4328                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4329                                 5 : 10;
4330                         break;
4331                 case DPLLB_MODE_LVDS:
4332                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4333                                 7 : 14;
4334                         break;
4335                 default:
4336                         drm_dbg_kms(&dev_priv->drm,
4337                                     "Unknown DPLL mode %08x in programmed "
4338                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
4339                         return;
4340                 }
4341
4342                 if (IS_PINEVIEW(dev_priv))
4343                         port_clock = pnv_calc_dpll_params(refclk, &clock);
4344                 else
4345                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
4346         } else {
4347                 enum pipe lvds_pipe;
4348
4349                 if (IS_I85X(dev_priv) &&
4350                     intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4351                     lvds_pipe == crtc->pipe) {
4352                         u32 lvds = intel_de_read(dev_priv, LVDS);
4353
4354                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4355                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
4356
4357                         if (lvds & LVDS_CLKB_POWER_UP)
4358                                 clock.p2 = 7;
4359                         else
4360                                 clock.p2 = 14;
4361                 } else {
4362                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
4363                                 clock.p1 = 2;
4364                         else {
4365                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4366                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4367                         }
4368                         if (dpll & PLL_P2_DIVIDE_BY_4)
4369                                 clock.p2 = 4;
4370                         else
4371                                 clock.p2 = 2;
4372                 }
4373
4374                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
4375         }
4376
4377         /*
4378          * This value includes pixel_multiplier. We will use
4379          * port_clock to compute adjusted_mode.crtc_clock in the
4380          * encoder's get_config() function.
4381          */
4382         pipe_config->port_clock = port_clock;
4383 }
4384
4385 int intel_dotclock_calculate(int link_freq,
4386                              const struct intel_link_m_n *m_n)
4387 {
4388         /*
4389          * The calculation for the data clock is:
4390          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4391          * But we want to avoid losing precison if possible, so:
4392          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4393          *
4394          * and the link clock is simpler:
4395          * link_clock = (m * link_clock) / n
4396          */
4397
4398         if (!m_n->link_n)
4399                 return 0;
4400
4401         return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
4402                                 m_n->link_n);
4403 }
4404
4405 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
4406 {
4407         int dotclock;
4408
4409         if (intel_crtc_has_dp_encoder(pipe_config))
4410                 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
4411                                                     &pipe_config->dp_m_n);
4412         else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
4413                 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
4414                                              pipe_config->pipe_bpp);
4415         else
4416                 dotclock = pipe_config->port_clock;
4417
4418         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4419             !intel_crtc_has_dp_encoder(pipe_config))
4420                 dotclock *= 2;
4421
4422         if (pipe_config->pixel_multiplier)
4423                 dotclock /= pipe_config->pixel_multiplier;
4424
4425         return dotclock;
4426 }
4427
4428 /* Returns the currently programmed mode of the given encoder. */
4429 struct drm_display_mode *
4430 intel_encoder_current_mode(struct intel_encoder *encoder)
4431 {
4432         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4433         struct intel_crtc_state *crtc_state;
4434         struct drm_display_mode *mode;
4435         struct intel_crtc *crtc;
4436         enum pipe pipe;
4437
4438         if (!encoder->get_hw_state(encoder, &pipe))
4439                 return NULL;
4440
4441         crtc = intel_crtc_for_pipe(dev_priv, pipe);
4442
4443         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4444         if (!mode)
4445                 return NULL;
4446
4447         crtc_state = intel_crtc_state_alloc(crtc);
4448         if (!crtc_state) {
4449                 kfree(mode);
4450                 return NULL;
4451         }
4452
4453         if (!intel_crtc_get_pipe_config(crtc_state)) {
4454                 kfree(crtc_state);
4455                 kfree(mode);
4456                 return NULL;
4457         }
4458
4459         intel_encoder_get_config(encoder, crtc_state);
4460
4461         intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4462
4463         kfree(crtc_state);
4464
4465         return mode;
4466 }
4467
4468 static bool encoders_cloneable(const struct intel_encoder *a,
4469                                const struct intel_encoder *b)
4470 {
4471         /* masks could be asymmetric, so check both ways */
4472         return a == b || (a->cloneable & BIT(b->type) &&
4473                           b->cloneable & BIT(a->type));
4474 }
4475
4476 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4477                                          struct intel_crtc *crtc,
4478                                          struct intel_encoder *encoder)
4479 {
4480         struct intel_encoder *source_encoder;
4481         struct drm_connector *connector;
4482         struct drm_connector_state *connector_state;
4483         int i;
4484
4485         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4486                 if (connector_state->crtc != &crtc->base)
4487                         continue;
4488
4489                 source_encoder =
4490                         to_intel_encoder(connector_state->best_encoder);
4491                 if (!encoders_cloneable(encoder, source_encoder))
4492                         return false;
4493         }
4494
4495         return true;
4496 }
4497
4498 static int icl_add_linked_planes(struct intel_atomic_state *state)
4499 {
4500         struct intel_plane *plane, *linked;
4501         struct intel_plane_state *plane_state, *linked_plane_state;
4502         int i;
4503
4504         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4505                 linked = plane_state->planar_linked_plane;
4506
4507                 if (!linked)
4508                         continue;
4509
4510                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4511                 if (IS_ERR(linked_plane_state))
4512                         return PTR_ERR(linked_plane_state);
4513
4514                 drm_WARN_ON(state->base.dev,
4515                             linked_plane_state->planar_linked_plane != plane);
4516                 drm_WARN_ON(state->base.dev,
4517                             linked_plane_state->planar_slave == plane_state->planar_slave);
4518         }
4519
4520         return 0;
4521 }
4522
4523 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
4524 {
4525         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4526         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4527         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
4528         struct intel_plane *plane, *linked;
4529         struct intel_plane_state *plane_state;
4530         int i;
4531
4532         if (DISPLAY_VER(dev_priv) < 11)
4533                 return 0;
4534
4535         /*
4536          * Destroy all old plane links and make the slave plane invisible
4537          * in the crtc_state->active_planes mask.
4538          */
4539         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4540                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4541                         continue;
4542
4543                 plane_state->planar_linked_plane = NULL;
4544                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4545                         crtc_state->enabled_planes &= ~BIT(plane->id);
4546                         crtc_state->active_planes &= ~BIT(plane->id);
4547                         crtc_state->update_planes |= BIT(plane->id);
4548                         crtc_state->data_rate[plane->id] = 0;
4549                         crtc_state->rel_data_rate[plane->id] = 0;
4550                 }
4551
4552                 plane_state->planar_slave = false;
4553         }
4554
4555         if (!crtc_state->nv12_planes)
4556                 return 0;
4557
4558         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4559                 struct intel_plane_state *linked_state = NULL;
4560
4561                 if (plane->pipe != crtc->pipe ||
4562                     !(crtc_state->nv12_planes & BIT(plane->id)))
4563                         continue;
4564
4565                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4566                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4567                                 continue;
4568
4569                         if (crtc_state->active_planes & BIT(linked->id))
4570                                 continue;
4571
4572                         linked_state = intel_atomic_get_plane_state(state, linked);
4573                         if (IS_ERR(linked_state))
4574                                 return PTR_ERR(linked_state);
4575
4576                         break;
4577                 }
4578
4579                 if (!linked_state) {
4580                         drm_dbg_kms(&dev_priv->drm,
4581                                     "Need %d free Y planes for planar YUV\n",
4582                                     hweight8(crtc_state->nv12_planes));
4583
4584                         return -EINVAL;
4585                 }
4586
4587                 plane_state->planar_linked_plane = linked;
4588
4589                 linked_state->planar_slave = true;
4590                 linked_state->planar_linked_plane = plane;
4591                 crtc_state->enabled_planes |= BIT(linked->id);
4592                 crtc_state->active_planes |= BIT(linked->id);
4593                 crtc_state->update_planes |= BIT(linked->id);
4594                 crtc_state->data_rate[linked->id] =
4595                         crtc_state->data_rate_y[plane->id];
4596                 crtc_state->rel_data_rate[linked->id] =
4597                         crtc_state->rel_data_rate_y[plane->id];
4598                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4599                             linked->base.name, plane->base.name);
4600
4601                 /* Copy parameters to slave plane */
4602                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4603                 linked_state->color_ctl = plane_state->color_ctl;
4604                 linked_state->view = plane_state->view;
4605                 linked_state->decrypt = plane_state->decrypt;
4606
4607                 intel_plane_copy_hw_state(linked_state, plane_state);
4608                 linked_state->uapi.src = plane_state->uapi.src;
4609                 linked_state->uapi.dst = plane_state->uapi.dst;
4610
4611                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4612                         if (linked->id == PLANE_SPRITE5)
4613                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4614                         else if (linked->id == PLANE_SPRITE4)
4615                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4616                         else if (linked->id == PLANE_SPRITE3)
4617                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4618                         else if (linked->id == PLANE_SPRITE2)
4619                                 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4620                         else
4621                                 MISSING_CASE(linked->id);
4622                 }
4623         }
4624
4625         return 0;
4626 }
4627
4628 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
4629 {
4630         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4631         struct intel_atomic_state *state =
4632                 to_intel_atomic_state(new_crtc_state->uapi.state);
4633         const struct intel_crtc_state *old_crtc_state =
4634                 intel_atomic_get_old_crtc_state(state, crtc);
4635
4636         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
4637 }
4638
4639 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4640 {
4641         const struct drm_display_mode *pipe_mode =
4642                 &crtc_state->hw.pipe_mode;
4643         int linetime_wm;
4644
4645         if (!crtc_state->hw.enable)
4646                 return 0;
4647
4648         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4649                                         pipe_mode->crtc_clock);
4650
4651         return min(linetime_wm, 0x1ff);
4652 }
4653
4654 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4655                                const struct intel_cdclk_state *cdclk_state)
4656 {
4657         const struct drm_display_mode *pipe_mode =
4658                 &crtc_state->hw.pipe_mode;
4659         int linetime_wm;
4660
4661         if (!crtc_state->hw.enable)
4662                 return 0;
4663
4664         linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4665                                         cdclk_state->logical.cdclk);
4666
4667         return min(linetime_wm, 0x1ff);
4668 }
4669
4670 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4671 {
4672         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4673         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4674         const struct drm_display_mode *pipe_mode =
4675                 &crtc_state->hw.pipe_mode;
4676         int linetime_wm;
4677
4678         if (!crtc_state->hw.enable)
4679                 return 0;
4680
4681         linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4682                                    crtc_state->pixel_rate);
4683
4684         /* Display WA #1135: BXT:ALL GLK:ALL */
4685         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4686             skl_watermark_ipc_enabled(dev_priv))
4687                 linetime_wm /= 2;
4688
4689         return min(linetime_wm, 0x1ff);
4690 }
4691
4692 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4693                                    struct intel_crtc *crtc)
4694 {
4695         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4696         struct intel_crtc_state *crtc_state =
4697                 intel_atomic_get_new_crtc_state(state, crtc);
4698         const struct intel_cdclk_state *cdclk_state;
4699
4700         if (DISPLAY_VER(dev_priv) >= 9)
4701                 crtc_state->linetime = skl_linetime_wm(crtc_state);
4702         else
4703                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4704
4705         if (!hsw_crtc_supports_ips(crtc))
4706                 return 0;
4707
4708         cdclk_state = intel_atomic_get_cdclk_state(state);
4709         if (IS_ERR(cdclk_state))
4710                 return PTR_ERR(cdclk_state);
4711
4712         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4713                                                        cdclk_state);
4714
4715         return 0;
4716 }
4717
4718 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4719                                    struct intel_crtc *crtc)
4720 {
4721         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4722         struct intel_crtc_state *crtc_state =
4723                 intel_atomic_get_new_crtc_state(state, crtc);
4724         int ret;
4725
4726         if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4727             intel_crtc_needs_modeset(crtc_state) &&
4728             !crtc_state->hw.active)
4729                 crtc_state->update_wm_post = true;
4730
4731         if (intel_crtc_needs_modeset(crtc_state)) {
4732                 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4733                 if (ret)
4734                         return ret;
4735         }
4736
4737         /*
4738          * May need to update pipe gamma enable bits
4739          * when C8 planes are getting enabled/disabled.
4740          */
4741         if (c8_planes_changed(crtc_state))
4742                 crtc_state->uapi.color_mgmt_changed = true;
4743
4744         if (intel_crtc_needs_color_update(crtc_state)) {
4745                 ret = intel_color_check(crtc_state);
4746                 if (ret)
4747                         return ret;
4748         }
4749
4750         ret = intel_compute_pipe_wm(state, crtc);
4751         if (ret) {
4752                 drm_dbg_kms(&dev_priv->drm,
4753                             "Target pipe watermarks are invalid\n");
4754                 return ret;
4755         }
4756
4757         /*
4758          * Calculate 'intermediate' watermarks that satisfy both the
4759          * old state and the new state.  We can program these
4760          * immediately.
4761          */
4762         ret = intel_compute_intermediate_wm(state, crtc);
4763         if (ret) {
4764                 drm_dbg_kms(&dev_priv->drm,
4765                             "No valid intermediate pipe watermarks are possible\n");
4766                 return ret;
4767         }
4768
4769         if (DISPLAY_VER(dev_priv) >= 9) {
4770                 if (intel_crtc_needs_modeset(crtc_state) ||
4771                     intel_crtc_needs_fastset(crtc_state)) {
4772                         ret = skl_update_scaler_crtc(crtc_state);
4773                         if (ret)
4774                                 return ret;
4775                 }
4776
4777                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4778                 if (ret)
4779                         return ret;
4780         }
4781
4782         if (HAS_IPS(dev_priv)) {
4783                 ret = hsw_ips_compute_config(state, crtc);
4784                 if (ret)
4785                         return ret;
4786         }
4787
4788         if (DISPLAY_VER(dev_priv) >= 9 ||
4789             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4790                 ret = hsw_compute_linetime_wm(state, crtc);
4791                 if (ret)
4792                         return ret;
4793
4794         }
4795
4796         ret = intel_psr2_sel_fetch_update(state, crtc);
4797         if (ret)
4798                 return ret;
4799
4800         return 0;
4801 }
4802
4803 static int
4804 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4805                       struct intel_crtc_state *crtc_state)
4806 {
4807         struct drm_connector *connector = conn_state->connector;
4808         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4809         const struct drm_display_info *info = &connector->display_info;
4810         int bpp;
4811
4812         switch (conn_state->max_bpc) {
4813         case 6 ... 7:
4814                 bpp = 6 * 3;
4815                 break;
4816         case 8 ... 9:
4817                 bpp = 8 * 3;
4818                 break;
4819         case 10 ... 11:
4820                 bpp = 10 * 3;
4821                 break;
4822         case 12 ... 16:
4823                 bpp = 12 * 3;
4824                 break;
4825         default:
4826                 MISSING_CASE(conn_state->max_bpc);
4827                 return -EINVAL;
4828         }
4829
4830         if (bpp < crtc_state->pipe_bpp) {
4831                 drm_dbg_kms(&i915->drm,
4832                             "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4833                             "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4834                             connector->base.id, connector->name,
4835                             bpp, 3 * info->bpc,
4836                             3 * conn_state->max_requested_bpc,
4837                             crtc_state->pipe_bpp);
4838
4839                 crtc_state->pipe_bpp = bpp;
4840         }
4841
4842         return 0;
4843 }
4844
4845 static int
4846 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4847                           struct intel_crtc *crtc)
4848 {
4849         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4850         struct intel_crtc_state *crtc_state =
4851                 intel_atomic_get_new_crtc_state(state, crtc);
4852         struct drm_connector *connector;
4853         struct drm_connector_state *connector_state;
4854         int bpp, i;
4855
4856         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4857             IS_CHERRYVIEW(dev_priv)))
4858                 bpp = 10*3;
4859         else if (DISPLAY_VER(dev_priv) >= 5)
4860                 bpp = 12*3;
4861         else
4862                 bpp = 8*3;
4863
4864         crtc_state->pipe_bpp = bpp;
4865
4866         /* Clamp display bpp to connector max bpp */
4867         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4868                 int ret;
4869
4870                 if (connector_state->crtc != &crtc->base)
4871                         continue;
4872
4873                 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4874                 if (ret)
4875                         return ret;
4876         }
4877
4878         return 0;
4879 }
4880
4881 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4882 {
4883         struct drm_device *dev = state->base.dev;
4884         struct drm_connector *connector;
4885         struct drm_connector_list_iter conn_iter;
4886         unsigned int used_ports = 0;
4887         unsigned int used_mst_ports = 0;
4888         bool ret = true;
4889
4890         /*
4891          * We're going to peek into connector->state,
4892          * hence connection_mutex must be held.
4893          */
4894         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
4895
4896         /*
4897          * Walk the connector list instead of the encoder
4898          * list to detect the problem on ddi platforms
4899          * where there's just one encoder per digital port.
4900          */
4901         drm_connector_list_iter_begin(dev, &conn_iter);
4902         drm_for_each_connector_iter(connector, &conn_iter) {
4903                 struct drm_connector_state *connector_state;
4904                 struct intel_encoder *encoder;
4905
4906                 connector_state =
4907                         drm_atomic_get_new_connector_state(&state->base,
4908                                                            connector);
4909                 if (!connector_state)
4910                         connector_state = connector->state;
4911
4912                 if (!connector_state->best_encoder)
4913                         continue;
4914
4915                 encoder = to_intel_encoder(connector_state->best_encoder);
4916
4917                 drm_WARN_ON(dev, !connector_state->crtc);
4918
4919                 switch (encoder->type) {
4920                 case INTEL_OUTPUT_DDI:
4921                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
4922                                 break;
4923                         fallthrough;
4924                 case INTEL_OUTPUT_DP:
4925                 case INTEL_OUTPUT_HDMI:
4926                 case INTEL_OUTPUT_EDP:
4927                         /* the same port mustn't appear more than once */
4928                         if (used_ports & BIT(encoder->port))
4929                                 ret = false;
4930
4931                         used_ports |= BIT(encoder->port);
4932                         break;
4933                 case INTEL_OUTPUT_DP_MST:
4934                         used_mst_ports |=
4935                                 1 << encoder->port;
4936                         break;
4937                 default:
4938                         break;
4939                 }
4940         }
4941         drm_connector_list_iter_end(&conn_iter);
4942
4943         /* can't mix MST and SST/HDMI on the same port */
4944         if (used_ports & used_mst_ports)
4945                 return false;
4946
4947         return ret;
4948 }
4949
4950 static void
4951 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
4952                                            struct intel_crtc *crtc)
4953 {
4954         struct intel_crtc_state *crtc_state =
4955                 intel_atomic_get_new_crtc_state(state, crtc);
4956
4957         WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4958
4959         drm_property_replace_blob(&crtc_state->hw.degamma_lut,
4960                                   crtc_state->uapi.degamma_lut);
4961         drm_property_replace_blob(&crtc_state->hw.gamma_lut,
4962                                   crtc_state->uapi.gamma_lut);
4963         drm_property_replace_blob(&crtc_state->hw.ctm,
4964                                   crtc_state->uapi.ctm);
4965 }
4966
4967 static void
4968 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
4969                                          struct intel_crtc *crtc)
4970 {
4971         struct intel_crtc_state *crtc_state =
4972                 intel_atomic_get_new_crtc_state(state, crtc);
4973
4974         WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
4975
4976         crtc_state->hw.enable = crtc_state->uapi.enable;
4977         crtc_state->hw.active = crtc_state->uapi.active;
4978         drm_mode_copy(&crtc_state->hw.mode,
4979                       &crtc_state->uapi.mode);
4980         drm_mode_copy(&crtc_state->hw.adjusted_mode,
4981                       &crtc_state->uapi.adjusted_mode);
4982         crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
4983
4984         intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
4985 }
4986
4987 static void
4988 copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
4989                                     struct intel_crtc *slave_crtc)
4990 {
4991         struct intel_crtc_state *slave_crtc_state =
4992                 intel_atomic_get_new_crtc_state(state, slave_crtc);
4993         struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
4994         const struct intel_crtc_state *master_crtc_state =
4995                 intel_atomic_get_new_crtc_state(state, master_crtc);
4996
4997         drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
4998                                   master_crtc_state->hw.degamma_lut);
4999         drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
5000                                   master_crtc_state->hw.gamma_lut);
5001         drm_property_replace_blob(&slave_crtc_state->hw.ctm,
5002                                   master_crtc_state->hw.ctm);
5003
5004         slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
5005 }
5006
5007 static int
5008 copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
5009                                   struct intel_crtc *slave_crtc)
5010 {
5011         struct intel_crtc_state *slave_crtc_state =
5012                 intel_atomic_get_new_crtc_state(state, slave_crtc);
5013         struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
5014         const struct intel_crtc_state *master_crtc_state =
5015                 intel_atomic_get_new_crtc_state(state, master_crtc);
5016         struct intel_crtc_state *saved_state;
5017
5018         WARN_ON(master_crtc_state->bigjoiner_pipes !=
5019                 slave_crtc_state->bigjoiner_pipes);
5020
5021         saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5022         if (!saved_state)
5023                 return -ENOMEM;
5024
5025         /* preserve some things from the slave's original crtc state */
5026         saved_state->uapi = slave_crtc_state->uapi;
5027         saved_state->scaler_state = slave_crtc_state->scaler_state;
5028         saved_state->shared_dpll = slave_crtc_state->shared_dpll;
5029         saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
5030         saved_state->crc_enabled = slave_crtc_state->crc_enabled;
5031
5032         intel_crtc_free_hw_state(slave_crtc_state);
5033         memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
5034         kfree(saved_state);
5035
5036         /* Re-init hw state */
5037         memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
5038         slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
5039         slave_crtc_state->hw.active = master_crtc_state->hw.active;
5040         drm_mode_copy(&slave_crtc_state->hw.mode,
5041                       &master_crtc_state->hw.mode);
5042         drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
5043                       &master_crtc_state->hw.pipe_mode);
5044         drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
5045                       &master_crtc_state->hw.adjusted_mode);
5046         slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
5047
5048         copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
5049
5050         slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
5051         slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
5052         slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
5053
5054         WARN_ON(master_crtc_state->bigjoiner_pipes !=
5055                 slave_crtc_state->bigjoiner_pipes);
5056
5057         return 0;
5058 }
5059
5060 static int
5061 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5062                                  struct intel_crtc *crtc)
5063 {
5064         struct intel_crtc_state *crtc_state =
5065                 intel_atomic_get_new_crtc_state(state, crtc);
5066         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5067         struct intel_crtc_state *saved_state;
5068
5069         saved_state = intel_crtc_state_alloc(crtc);
5070         if (!saved_state)
5071                 return -ENOMEM;
5072
5073         /* free the old crtc_state->hw members */
5074         intel_crtc_free_hw_state(crtc_state);
5075
5076         /* FIXME: before the switch to atomic started, a new pipe_config was
5077          * kzalloc'd. Code that depends on any field being zero should be
5078          * fixed, so that the crtc_state can be safely duplicated. For now,
5079          * only fields that are know to not cause problems are preserved. */
5080
5081         saved_state->uapi = crtc_state->uapi;
5082         saved_state->scaler_state = crtc_state->scaler_state;
5083         saved_state->shared_dpll = crtc_state->shared_dpll;
5084         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5085         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5086                sizeof(saved_state->icl_port_dplls));
5087         saved_state->crc_enabled = crtc_state->crc_enabled;
5088         if (IS_G4X(dev_priv) ||
5089             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5090                 saved_state->wm = crtc_state->wm;
5091
5092         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5093         kfree(saved_state);
5094
5095         intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
5096
5097         return 0;
5098 }
5099
5100 static int
5101 intel_modeset_pipe_config(struct intel_atomic_state *state,
5102                           struct intel_crtc *crtc)
5103 {
5104         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5105         struct intel_crtc_state *crtc_state =
5106                 intel_atomic_get_new_crtc_state(state, crtc);
5107         struct drm_connector *connector;
5108         struct drm_connector_state *connector_state;
5109         int pipe_src_w, pipe_src_h;
5110         int base_bpp, ret, i;
5111         bool retry = true;
5112
5113         crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
5114
5115         crtc_state->framestart_delay = 1;
5116
5117         /*
5118          * Sanitize sync polarity flags based on requested ones. If neither
5119          * positive or negative polarity is requested, treat this as meaning
5120          * negative polarity.
5121          */
5122         if (!(crtc_state->hw.adjusted_mode.flags &
5123               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5124                 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5125
5126         if (!(crtc_state->hw.adjusted_mode.flags &
5127               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5128                 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5129
5130         ret = compute_baseline_pipe_bpp(state, crtc);
5131         if (ret)
5132                 return ret;
5133
5134         base_bpp = crtc_state->pipe_bpp;
5135
5136         /*
5137          * Determine the real pipe dimensions. Note that stereo modes can
5138          * increase the actual pipe size due to the frame doubling and
5139          * insertion of additional space for blanks between the frame. This
5140          * is stored in the crtc timings. We use the requested mode to do this
5141          * computation to clearly distinguish it from the adjusted mode, which
5142          * can be changed by the connectors in the below retry loop.
5143          */
5144         drm_mode_get_hv_timing(&crtc_state->hw.mode,
5145                                &pipe_src_w, &pipe_src_h);
5146         drm_rect_init(&crtc_state->pipe_src, 0, 0,
5147                       pipe_src_w, pipe_src_h);
5148
5149         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5150                 struct intel_encoder *encoder =
5151                         to_intel_encoder(connector_state->best_encoder);
5152
5153                 if (connector_state->crtc != &crtc->base)
5154                         continue;
5155
5156                 if (!check_single_encoder_cloning(state, crtc, encoder)) {
5157                         drm_dbg_kms(&i915->drm,
5158                                     "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
5159                                     encoder->base.base.id, encoder->base.name);
5160                         return -EINVAL;
5161                 }
5162
5163                 /*
5164                  * Determine output_types before calling the .compute_config()
5165                  * hooks so that the hooks can use this information safely.
5166                  */
5167                 if (encoder->compute_output_type)
5168                         crtc_state->output_types |=
5169                                 BIT(encoder->compute_output_type(encoder, crtc_state,
5170                                                                  connector_state));
5171                 else
5172                         crtc_state->output_types |= BIT(encoder->type);
5173         }
5174
5175 encoder_retry:
5176         /* Ensure the port clock defaults are reset when retrying. */
5177         crtc_state->port_clock = 0;
5178         crtc_state->pixel_multiplier = 1;
5179
5180         /* Fill in default crtc timings, allow encoders to overwrite them. */
5181         drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
5182                               CRTC_STEREO_DOUBLE);
5183
5184         /* Pass our mode to the connectors and the CRTC to give them a chance to
5185          * adjust it according to limitations or connector properties, and also
5186          * a chance to reject the mode entirely.
5187          */
5188         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5189                 struct intel_encoder *encoder =
5190                         to_intel_encoder(connector_state->best_encoder);
5191
5192                 if (connector_state->crtc != &crtc->base)
5193                         continue;
5194
5195                 ret = encoder->compute_config(encoder, crtc_state,
5196                                               connector_state);
5197                 if (ret == -EDEADLK)
5198                         return ret;
5199                 if (ret < 0) {
5200                         drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
5201                                     encoder->base.base.id, encoder->base.name, ret);
5202                         return ret;
5203                 }
5204         }
5205
5206         /* Set default port clock if not overwritten by the encoder. Needs to be
5207          * done afterwards in case the encoder adjusts the mode. */
5208         if (!crtc_state->port_clock)
5209                 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
5210                         * crtc_state->pixel_multiplier;
5211
5212         ret = intel_crtc_compute_config(state, crtc);
5213         if (ret == -EDEADLK)
5214                 return ret;
5215         if (ret == -EAGAIN) {
5216                 if (drm_WARN(&i915->drm, !retry,
5217                              "[CRTC:%d:%s] loop in pipe configuration computation\n",
5218                              crtc->base.base.id, crtc->base.name))
5219                         return -EINVAL;
5220
5221                 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
5222                             crtc->base.base.id, crtc->base.name);
5223                 retry = false;
5224                 goto encoder_retry;
5225         }
5226         if (ret < 0) {
5227                 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
5228                             crtc->base.base.id, crtc->base.name, ret);
5229                 return ret;
5230         }
5231
5232         /* Dithering seems to not pass-through bits correctly when it should, so
5233          * only enable it on 6bpc panels and when its not a compliance
5234          * test requesting 6bpc video pattern.
5235          */
5236         crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
5237                 !crtc_state->dither_force_disable;
5238         drm_dbg_kms(&i915->drm,
5239                     "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
5240                     crtc->base.base.id, crtc->base.name,
5241                     base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
5242
5243         return 0;
5244 }
5245
5246 static int
5247 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
5248                                struct intel_crtc *crtc)
5249 {
5250         struct intel_crtc_state *crtc_state =
5251                 intel_atomic_get_new_crtc_state(state, crtc);
5252         struct drm_connector_state *conn_state;
5253         struct drm_connector *connector;
5254         int i;
5255
5256         intel_bigjoiner_adjust_pipe_src(crtc_state);
5257
5258         for_each_new_connector_in_state(&state->base, connector,
5259                                         conn_state, i) {
5260                 struct intel_encoder *encoder =
5261                         to_intel_encoder(conn_state->best_encoder);
5262                 int ret;
5263
5264                 if (conn_state->crtc != &crtc->base ||
5265                     !encoder->compute_config_late)
5266                         continue;
5267
5268                 ret = encoder->compute_config_late(encoder, crtc_state,
5269                                                    conn_state);
5270                 if (ret)
5271                         return ret;
5272         }
5273
5274         return 0;
5275 }
5276
5277 bool intel_fuzzy_clock_check(int clock1, int clock2)
5278 {
5279         int diff;
5280
5281         if (clock1 == clock2)
5282                 return true;
5283
5284         if (!clock1 || !clock2)
5285                 return false;
5286
5287         diff = abs(clock1 - clock2);
5288
5289         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
5290                 return true;
5291
5292         return false;
5293 }
5294
5295 static bool
5296 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
5297                        const struct intel_link_m_n *m2_n2)
5298 {
5299         return m_n->tu == m2_n2->tu &&
5300                 m_n->data_m == m2_n2->data_m &&
5301                 m_n->data_n == m2_n2->data_n &&
5302                 m_n->link_m == m2_n2->link_m &&
5303                 m_n->link_n == m2_n2->link_n;
5304 }
5305
5306 static bool
5307 intel_compare_infoframe(const union hdmi_infoframe *a,
5308                         const union hdmi_infoframe *b)
5309 {
5310         return memcmp(a, b, sizeof(*a)) == 0;
5311 }
5312
5313 static bool
5314 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
5315                          const struct drm_dp_vsc_sdp *b)
5316 {
5317         return memcmp(a, b, sizeof(*a)) == 0;
5318 }
5319
5320 static bool
5321 intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
5322 {
5323         return memcmp(a, b, len) == 0;
5324 }
5325
5326 static void
5327 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
5328                                bool fastset, const char *name,
5329                                const union hdmi_infoframe *a,
5330                                const union hdmi_infoframe *b)
5331 {
5332         if (fastset) {
5333                 if (!drm_debug_enabled(DRM_UT_KMS))
5334                         return;
5335
5336                 drm_dbg_kms(&dev_priv->drm,
5337                             "fastset mismatch in %s infoframe\n", name);
5338                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5339                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
5340                 drm_dbg_kms(&dev_priv->drm, "found:\n");
5341                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
5342         } else {
5343                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
5344                 drm_err(&dev_priv->drm, "expected:\n");
5345                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
5346                 drm_err(&dev_priv->drm, "found:\n");
5347                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
5348         }
5349 }
5350
5351 static void
5352 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
5353                                 bool fastset, const char *name,
5354                                 const struct drm_dp_vsc_sdp *a,
5355                                 const struct drm_dp_vsc_sdp *b)
5356 {
5357         if (fastset) {
5358                 if (!drm_debug_enabled(DRM_UT_KMS))
5359                         return;
5360
5361                 drm_dbg_kms(&dev_priv->drm,
5362                             "fastset mismatch in %s dp sdp\n", name);
5363                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
5364                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
5365                 drm_dbg_kms(&dev_priv->drm, "found:\n");
5366                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
5367         } else {
5368                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
5369                 drm_err(&dev_priv->drm, "expected:\n");
5370                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
5371                 drm_err(&dev_priv->drm, "found:\n");
5372                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
5373         }
5374 }
5375
5376 /* Returns the length up to and including the last differing byte */
5377 static size_t
5378 memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
5379 {
5380         int i;
5381
5382         for (i = len - 1; i >= 0; i--) {
5383                 if (a[i] != b[i])
5384                         return i + 1;
5385         }
5386
5387         return 0;
5388 }
5389
5390 static void
5391 pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
5392                             bool fastset, const char *name,
5393                             const u8 *a, const u8 *b, size_t len)
5394 {
5395         if (fastset) {
5396                 if (!drm_debug_enabled(DRM_UT_KMS))
5397                         return;
5398
5399                 /* only dump up to the last difference */
5400                 len = memcmp_diff_len(a, b, len);
5401
5402                 drm_dbg_kms(&dev_priv->drm,
5403                             "fastset mismatch in %s buffer\n", name);
5404                 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
5405                                16, 0, a, len, false);
5406                 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
5407                                16, 0, b, len, false);
5408         } else {
5409                 /* only dump up to the last difference */
5410                 len = memcmp_diff_len(a, b, len);
5411
5412                 drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
5413                 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
5414                                16, 0, a, len, false);
5415                 print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
5416                                16, 0, b, len, false);
5417         }
5418 }
5419
5420 static void __printf(4, 5)
5421 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
5422                      const char *name, const char *format, ...)
5423 {
5424         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5425         struct va_format vaf;
5426         va_list args;
5427
5428         va_start(args, format);
5429         vaf.fmt = format;
5430         vaf.va = &args;
5431
5432         if (fastset)
5433                 drm_dbg_kms(&i915->drm,
5434                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
5435                             crtc->base.base.id, crtc->base.name, name, &vaf);
5436         else
5437                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
5438                         crtc->base.base.id, crtc->base.name, name, &vaf);
5439
5440         va_end(args);
5441 }
5442
5443 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
5444 {
5445         if (dev_priv->params.fastboot != -1)
5446                 return dev_priv->params.fastboot;
5447
5448         /* Enable fastboot by default on Skylake and newer */
5449         if (DISPLAY_VER(dev_priv) >= 9)
5450                 return true;
5451
5452         /* Enable fastboot by default on VLV and CHV */
5453         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5454                 return true;
5455
5456         /* Disabled by default on all others */
5457         return false;
5458 }
5459
5460 bool
5461 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5462                           const struct intel_crtc_state *pipe_config,
5463                           bool fastset)
5464 {
5465         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5466         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5467         bool ret = true;
5468         bool fixup_inherited = fastset &&
5469                 current_config->inherited && !pipe_config->inherited;
5470
5471         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
5472                 drm_dbg_kms(&dev_priv->drm,
5473                             "initial modeset and fastboot not set\n");
5474                 ret = false;
5475         }
5476
5477 #define PIPE_CONF_CHECK_X(name) do { \
5478         if (current_config->name != pipe_config->name) { \
5479                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5480                                      "(expected 0x%08x, found 0x%08x)", \
5481                                      current_config->name, \
5482                                      pipe_config->name); \
5483                 ret = false; \
5484         } \
5485 } while (0)
5486
5487 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5488         if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5489                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5490                                      "(expected 0x%08x, found 0x%08x)", \
5491                                      current_config->name & (mask), \
5492                                      pipe_config->name & (mask)); \
5493                 ret = false; \
5494         } \
5495 } while (0)
5496
5497 #define PIPE_CONF_CHECK_I(name) do { \
5498         if (current_config->name != pipe_config->name) { \
5499                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5500                                      "(expected %i, found %i)", \
5501                                      current_config->name, \
5502                                      pipe_config->name); \
5503                 ret = false; \
5504         } \
5505 } while (0)
5506
5507 #define PIPE_CONF_CHECK_BOOL(name) do { \
5508         if (current_config->name != pipe_config->name) { \
5509                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
5510                                      "(expected %s, found %s)", \
5511                                      str_yes_no(current_config->name), \
5512                                      str_yes_no(pipe_config->name)); \
5513                 ret = false; \
5514         } \
5515 } while (0)
5516
5517 /*
5518  * Checks state where we only read out the enabling, but not the entire
5519  * state itself (like full infoframes or ELD for audio). These states
5520  * require a full modeset on bootup to fix up.
5521  */
5522 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
5523         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
5524                 PIPE_CONF_CHECK_BOOL(name); \
5525         } else { \
5526                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5527                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
5528                                      str_yes_no(current_config->name), \
5529                                      str_yes_no(pipe_config->name)); \
5530                 ret = false; \
5531         } \
5532 } while (0)
5533
5534 #define PIPE_CONF_CHECK_P(name) do { \
5535         if (current_config->name != pipe_config->name) { \
5536                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5537                                      "(expected %p, found %p)", \
5538                                      current_config->name, \
5539                                      pipe_config->name); \
5540                 ret = false; \
5541         } \
5542 } while (0)
5543
5544 #define PIPE_CONF_CHECK_M_N(name) do { \
5545         if (!intel_compare_link_m_n(&current_config->name, \
5546                                     &pipe_config->name)) { \
5547                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5548                                      "(expected tu %i data %i/%i link %i/%i, " \
5549                                      "found tu %i, data %i/%i link %i/%i)", \
5550                                      current_config->name.tu, \
5551                                      current_config->name.data_m, \
5552                                      current_config->name.data_n, \
5553                                      current_config->name.link_m, \
5554                                      current_config->name.link_n, \
5555                                      pipe_config->name.tu, \
5556                                      pipe_config->name.data_m, \
5557                                      pipe_config->name.data_n, \
5558                                      pipe_config->name.link_m, \
5559                                      pipe_config->name.link_n); \
5560                 ret = false; \
5561         } \
5562 } while (0)
5563
5564 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5565         PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5566         PIPE_CONF_CHECK_I(name.crtc_htotal); \
5567         PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5568         PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5569         PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5570         PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5571         PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5572         PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5573         PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5574         PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5575         PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5576         PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5577 } while (0)
5578
5579 #define PIPE_CONF_CHECK_RECT(name) do { \
5580         PIPE_CONF_CHECK_I(name.x1); \
5581         PIPE_CONF_CHECK_I(name.x2); \
5582         PIPE_CONF_CHECK_I(name.y1); \
5583         PIPE_CONF_CHECK_I(name.y2); \
5584 } while (0)
5585
5586 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5587         if ((current_config->name ^ pipe_config->name) & (mask)) { \
5588                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
5589                                      "(%x) (expected %i, found %i)", \
5590                                      (mask), \
5591                                      current_config->name & (mask), \
5592                                      pipe_config->name & (mask)); \
5593                 ret = false; \
5594         } \
5595 } while (0)
5596
5597 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5598         if (!intel_compare_infoframe(&current_config->infoframes.name, \
5599                                      &pipe_config->infoframes.name)) { \
5600                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
5601                                                &current_config->infoframes.name, \
5602                                                &pipe_config->infoframes.name); \
5603                 ret = false; \
5604         } \
5605 } while (0)
5606
5607 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5608         if (!current_config->has_psr && !pipe_config->has_psr && \
5609             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
5610                                       &pipe_config->infoframes.name)) { \
5611                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5612                                                 &current_config->infoframes.name, \
5613                                                 &pipe_config->infoframes.name); \
5614                 ret = false; \
5615         } \
5616 } while (0)
5617
5618 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \
5619         BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
5620         BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
5621         if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
5622                 pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
5623                                             current_config->name, \
5624                                             pipe_config->name, \
5625                                             (len)); \
5626                 ret = false; \
5627         } \
5628 } while (0)
5629
5630 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
5631         if (current_config->gamma_mode == pipe_config->gamma_mode && \
5632             !intel_color_lut_equal(current_config, \
5633                                    current_config->lut, pipe_config->lut, \
5634                                    is_pre_csc_lut)) {   \
5635                 pipe_config_mismatch(fastset, crtc, __stringify(lut), \
5636                                      "hw_state doesn't match sw_state"); \
5637                 ret = false; \
5638         } \
5639 } while (0)
5640
5641 #define PIPE_CONF_QUIRK(quirk) \
5642         ((current_config->quirks | pipe_config->quirks) & (quirk))
5643
5644         PIPE_CONF_CHECK_I(hw.enable);
5645         PIPE_CONF_CHECK_I(hw.active);
5646
5647         PIPE_CONF_CHECK_I(cpu_transcoder);
5648         PIPE_CONF_CHECK_I(mst_master_transcoder);
5649
5650         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5651         PIPE_CONF_CHECK_I(fdi_lanes);
5652         PIPE_CONF_CHECK_M_N(fdi_m_n);
5653
5654         PIPE_CONF_CHECK_I(lane_count);
5655         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5656
5657         if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5658                 if (!fastset || !pipe_config->seamless_m_n)
5659                         PIPE_CONF_CHECK_M_N(dp_m_n);
5660         } else {
5661                 PIPE_CONF_CHECK_M_N(dp_m_n);
5662                 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5663         }
5664
5665         PIPE_CONF_CHECK_X(output_types);
5666
5667         PIPE_CONF_CHECK_I(framestart_delay);
5668         PIPE_CONF_CHECK_I(msa_timing_delay);
5669
5670         PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5671         PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5672
5673         PIPE_CONF_CHECK_I(pixel_multiplier);
5674
5675         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5676                               DRM_MODE_FLAG_INTERLACE);
5677
5678         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5679                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5680                                       DRM_MODE_FLAG_PHSYNC);
5681                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5682                                       DRM_MODE_FLAG_NHSYNC);
5683                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5684                                       DRM_MODE_FLAG_PVSYNC);
5685                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5686                                       DRM_MODE_FLAG_NVSYNC);
5687         }
5688
5689         PIPE_CONF_CHECK_I(output_format);
5690         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5691         if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5692             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5693                 PIPE_CONF_CHECK_BOOL(limited_color_range);
5694
5695         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5696         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5697         PIPE_CONF_CHECK_BOOL(has_infoframe);
5698         PIPE_CONF_CHECK_BOOL(fec_enable);
5699
5700         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
5701         PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
5702
5703         PIPE_CONF_CHECK_X(gmch_pfit.control);
5704         /* pfit ratios are autocomputed by the hw on gen4+ */
5705         if (DISPLAY_VER(dev_priv) < 4)
5706                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5707         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5708
5709         /*
5710          * Changing the EDP transcoder input mux
5711          * (A_ONOFF vs. A_ON) requires a full modeset.
5712          */
5713         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5714
5715         if (!fastset) {
5716                 PIPE_CONF_CHECK_RECT(pipe_src);
5717
5718                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5719                 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5720
5721                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5722                 PIPE_CONF_CHECK_I(pixel_rate);
5723
5724                 PIPE_CONF_CHECK_X(gamma_mode);
5725                 if (IS_CHERRYVIEW(dev_priv))
5726                         PIPE_CONF_CHECK_X(cgm_mode);
5727                 else
5728                         PIPE_CONF_CHECK_X(csc_mode);
5729                 PIPE_CONF_CHECK_BOOL(gamma_enable);
5730                 PIPE_CONF_CHECK_BOOL(csc_enable);
5731
5732                 PIPE_CONF_CHECK_I(linetime);
5733                 PIPE_CONF_CHECK_I(ips_linetime);
5734
5735                 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
5736                 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
5737
5738                 if (current_config->active_planes) {
5739                         PIPE_CONF_CHECK_BOOL(has_psr);
5740                         PIPE_CONF_CHECK_BOOL(has_psr2);
5741                         PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5742                         PIPE_CONF_CHECK_I(dc3co_exitline);
5743                 }
5744         }
5745
5746         PIPE_CONF_CHECK_BOOL(double_wide);
5747
5748         if (dev_priv->display.dpll.mgr) {
5749                 PIPE_CONF_CHECK_P(shared_dpll);
5750
5751                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
5752                 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
5753                 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
5754                 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5755                 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5756                 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5757                 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
5758                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
5759                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
5760                 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
5761                 PIPE_CONF_CHECK_X(dpll_hw_state.div0);
5762                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
5763                 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
5764                 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
5765                 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
5766                 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
5767                 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
5768                 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
5769                 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
5770                 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
5771                 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
5772                 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
5773                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
5774                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
5775                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
5776                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
5777                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
5778                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
5779                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
5780                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
5781                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
5782                 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
5783         }
5784
5785         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5786         PIPE_CONF_CHECK_X(dsi_pll.div);
5787
5788         if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5789                 PIPE_CONF_CHECK_I(pipe_bpp);
5790
5791         if (!fastset || !pipe_config->seamless_m_n) {
5792                 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5793                 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5794         }
5795         PIPE_CONF_CHECK_I(port_clock);
5796
5797         PIPE_CONF_CHECK_I(min_voltage_level);
5798
5799         if (current_config->has_psr || pipe_config->has_psr)
5800                 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5801                                             ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5802         else
5803                 PIPE_CONF_CHECK_X(infoframes.enable);
5804
5805         PIPE_CONF_CHECK_X(infoframes.gcp);
5806         PIPE_CONF_CHECK_INFOFRAME(avi);
5807         PIPE_CONF_CHECK_INFOFRAME(spd);
5808         PIPE_CONF_CHECK_INFOFRAME(hdmi);
5809         PIPE_CONF_CHECK_INFOFRAME(drm);
5810         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5811
5812         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5813         PIPE_CONF_CHECK_I(master_transcoder);
5814         PIPE_CONF_CHECK_X(bigjoiner_pipes);
5815
5816         PIPE_CONF_CHECK_I(dsc.compression_enable);
5817         PIPE_CONF_CHECK_I(dsc.dsc_split);
5818         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
5819
5820         PIPE_CONF_CHECK_BOOL(splitter.enable);
5821         PIPE_CONF_CHECK_I(splitter.link_count);
5822         PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5823
5824         PIPE_CONF_CHECK_BOOL(vrr.enable);
5825         PIPE_CONF_CHECK_I(vrr.vmin);
5826         PIPE_CONF_CHECK_I(vrr.vmax);
5827         PIPE_CONF_CHECK_I(vrr.flipline);
5828         PIPE_CONF_CHECK_I(vrr.pipeline_full);
5829         PIPE_CONF_CHECK_I(vrr.guardband);
5830
5831 #undef PIPE_CONF_CHECK_X
5832 #undef PIPE_CONF_CHECK_I
5833 #undef PIPE_CONF_CHECK_BOOL
5834 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
5835 #undef PIPE_CONF_CHECK_P
5836 #undef PIPE_CONF_CHECK_FLAGS
5837 #undef PIPE_CONF_CHECK_COLOR_LUT
5838 #undef PIPE_CONF_CHECK_TIMINGS
5839 #undef PIPE_CONF_CHECK_RECT
5840 #undef PIPE_CONF_QUIRK
5841
5842         return ret;
5843 }
5844
5845 static void
5846 intel_verify_planes(struct intel_atomic_state *state)
5847 {
5848         struct intel_plane *plane;
5849         const struct intel_plane_state *plane_state;
5850         int i;
5851
5852         for_each_new_intel_plane_in_state(state, plane,
5853                                           plane_state, i)
5854                 assert_plane(plane, plane_state->planar_slave ||
5855                              plane_state->uapi.visible);
5856 }
5857
5858 int intel_modeset_all_pipes(struct intel_atomic_state *state,
5859                             const char *reason)
5860 {
5861         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5862         struct intel_crtc *crtc;
5863
5864         /*
5865          * Add all pipes to the state, and force
5866          * a modeset on all the active ones.
5867          */
5868         for_each_intel_crtc(&dev_priv->drm, crtc) {
5869                 struct intel_crtc_state *crtc_state;
5870                 int ret;
5871
5872                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5873                 if (IS_ERR(crtc_state))
5874                         return PTR_ERR(crtc_state);
5875
5876                 if (!crtc_state->hw.active ||
5877                     intel_crtc_needs_modeset(crtc_state))
5878                         continue;
5879
5880                 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5881                             crtc->base.base.id, crtc->base.name, reason);
5882
5883                 crtc_state->uapi.mode_changed = true;
5884                 crtc_state->update_pipe = false;
5885
5886                 ret = drm_atomic_add_affected_connectors(&state->base,
5887                                                          &crtc->base);
5888                 if (ret)
5889                         return ret;
5890
5891                 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5892                 if (ret)
5893                         return ret;
5894
5895                 ret = intel_atomic_add_affected_planes(state, crtc);
5896                 if (ret)
5897                         return ret;
5898
5899                 crtc_state->update_planes |= crtc_state->active_planes;
5900                 crtc_state->async_flip_planes = 0;
5901                 crtc_state->do_async_flip = false;
5902         }
5903
5904         return 0;
5905 }
5906
5907 void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
5908 {
5909         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5910         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5911         struct drm_display_mode adjusted_mode;
5912
5913         drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
5914
5915         if (crtc_state->vrr.enable) {
5916                 adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
5917                 adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
5918                 adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
5919                 crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
5920         }
5921
5922         drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
5923
5924         crtc->mode_flags = crtc_state->mode_flags;
5925
5926         /*
5927          * The scanline counter increments at the leading edge of hsync.
5928          *
5929          * On most platforms it starts counting from vtotal-1 on the
5930          * first active line. That means the scanline counter value is
5931          * always one less than what we would expect. Ie. just after
5932          * start of vblank, which also occurs at start of hsync (on the
5933          * last active line), the scanline counter will read vblank_start-1.
5934          *
5935          * On gen2 the scanline counter starts counting from 1 instead
5936          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
5937          * to keep the value positive), instead of adding one.
5938          *
5939          * On HSW+ the behaviour of the scanline counter depends on the output
5940          * type. For DP ports it behaves like most other platforms, but on HDMI
5941          * there's an extra 1 line difference. So we need to add two instead of
5942          * one to the value.
5943          *
5944          * On VLV/CHV DSI the scanline counter would appear to increment
5945          * approx. 1/3 of a scanline before start of vblank. Unfortunately
5946          * that means we can't tell whether we're in vblank or not while
5947          * we're on that particular line. We must still set scanline_offset
5948          * to 1 so that the vblank timestamps come out correct when we query
5949          * the scanline counter from within the vblank interrupt handler.
5950          * However if queried just before the start of vblank we'll get an
5951          * answer that's slightly in the future.
5952          */
5953         if (DISPLAY_VER(dev_priv) == 2) {
5954                 int vtotal;
5955
5956                 vtotal = adjusted_mode.crtc_vtotal;
5957                 if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5958                         vtotal /= 2;
5959
5960                 crtc->scanline_offset = vtotal - 1;
5961         } else if (HAS_DDI(dev_priv) &&
5962                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
5963                 crtc->scanline_offset = 2;
5964         } else {
5965                 crtc->scanline_offset = 1;
5966         }
5967 }
5968
5969 /*
5970  * This implements the workaround described in the "notes" section of the mode
5971  * set sequence documentation. When going from no pipes or single pipe to
5972  * multiple pipes, and planes are enabled after the pipe, we need to wait at
5973  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
5974  */
5975 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
5976 {
5977         struct intel_crtc_state *crtc_state;
5978         struct intel_crtc *crtc;
5979         struct intel_crtc_state *first_crtc_state = NULL;
5980         struct intel_crtc_state *other_crtc_state = NULL;
5981         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
5982         int i;
5983
5984         /* look at all crtc's that are going to be enabled in during modeset */
5985         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5986                 if (!crtc_state->hw.active ||
5987                     !intel_crtc_needs_modeset(crtc_state))
5988                         continue;
5989
5990                 if (first_crtc_state) {
5991                         other_crtc_state = crtc_state;
5992                         break;
5993                 } else {
5994                         first_crtc_state = crtc_state;
5995                         first_pipe = crtc->pipe;
5996                 }
5997         }
5998
5999         /* No workaround needed? */
6000         if (!first_crtc_state)
6001                 return 0;
6002
6003         /* w/a possibly needed, check how many crtc's are already enabled. */
6004         for_each_intel_crtc(state->base.dev, crtc) {
6005                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6006                 if (IS_ERR(crtc_state))
6007                         return PTR_ERR(crtc_state);
6008
6009                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
6010
6011                 if (!crtc_state->hw.active ||
6012                     intel_crtc_needs_modeset(crtc_state))
6013                         continue;
6014
6015                 /* 2 or more enabled crtcs means no need for w/a */
6016                 if (enabled_pipe != INVALID_PIPE)
6017                         return 0;
6018
6019                 enabled_pipe = crtc->pipe;
6020         }
6021
6022         if (enabled_pipe != INVALID_PIPE)
6023                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
6024         else if (other_crtc_state)
6025                 other_crtc_state->hsw_workaround_pipe = first_pipe;
6026
6027         return 0;
6028 }
6029
6030 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
6031                            u8 active_pipes)
6032 {
6033         const struct intel_crtc_state *crtc_state;
6034         struct intel_crtc *crtc;
6035         int i;
6036
6037         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6038                 if (crtc_state->hw.active)
6039                         active_pipes |= BIT(crtc->pipe);
6040                 else
6041                         active_pipes &= ~BIT(crtc->pipe);
6042         }
6043
6044         return active_pipes;
6045 }
6046
6047 static int intel_modeset_checks(struct intel_atomic_state *state)
6048 {
6049         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6050
6051         state->modeset = true;
6052
6053         if (IS_HASWELL(dev_priv))
6054                 return hsw_mode_set_planes_workaround(state);
6055
6056         return 0;
6057 }
6058
6059 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
6060                                      struct intel_crtc_state *new_crtc_state)
6061 {
6062         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
6063                 return;
6064
6065         new_crtc_state->uapi.mode_changed = false;
6066         if (!intel_crtc_needs_modeset(new_crtc_state))
6067                 new_crtc_state->update_pipe = true;
6068 }
6069
6070 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
6071                                           struct intel_crtc *crtc,
6072                                           u8 plane_ids_mask)
6073 {
6074         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6075         struct intel_plane *plane;
6076
6077         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6078                 struct intel_plane_state *plane_state;
6079
6080                 if ((plane_ids_mask & BIT(plane->id)) == 0)
6081                         continue;
6082
6083                 plane_state = intel_atomic_get_plane_state(state, plane);
6084                 if (IS_ERR(plane_state))
6085                         return PTR_ERR(plane_state);
6086         }
6087
6088         return 0;
6089 }
6090
6091 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
6092                                      struct intel_crtc *crtc)
6093 {
6094         const struct intel_crtc_state *old_crtc_state =
6095                 intel_atomic_get_old_crtc_state(state, crtc);
6096         const struct intel_crtc_state *new_crtc_state =
6097                 intel_atomic_get_new_crtc_state(state, crtc);
6098
6099         return intel_crtc_add_planes_to_state(state, crtc,
6100                                               old_crtc_state->enabled_planes |
6101                                               new_crtc_state->enabled_planes);
6102 }
6103
6104 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
6105 {
6106         /* See {hsw,vlv,ivb}_plane_ratio() */
6107         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
6108                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
6109                 IS_IVYBRIDGE(dev_priv);
6110 }
6111
6112 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
6113                                            struct intel_crtc *crtc,
6114                                            struct intel_crtc *other)
6115 {
6116         const struct intel_plane_state *plane_state;
6117         struct intel_plane *plane;
6118         u8 plane_ids = 0;
6119         int i;
6120
6121         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6122                 if (plane->pipe == crtc->pipe)
6123                         plane_ids |= BIT(plane->id);
6124         }
6125
6126         return intel_crtc_add_planes_to_state(state, other, plane_ids);
6127 }
6128
6129 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
6130 {
6131         struct drm_i915_private *i915 = to_i915(state->base.dev);
6132         const struct intel_crtc_state *crtc_state;
6133         struct intel_crtc *crtc;
6134         int i;
6135
6136         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6137                 struct intel_crtc *other;
6138
6139                 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
6140                                                  crtc_state->bigjoiner_pipes) {
6141                         int ret;
6142
6143                         if (crtc == other)
6144                                 continue;
6145
6146                         ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
6147                         if (ret)
6148                                 return ret;
6149                 }
6150         }
6151
6152         return 0;
6153 }
6154
6155 static int intel_atomic_check_planes(struct intel_atomic_state *state)
6156 {
6157         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6158         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6159         struct intel_plane_state *plane_state;
6160         struct intel_plane *plane;
6161         struct intel_crtc *crtc;
6162         int i, ret;
6163
6164         ret = icl_add_linked_planes(state);
6165         if (ret)
6166                 return ret;
6167
6168         ret = intel_bigjoiner_add_affected_planes(state);
6169         if (ret)
6170                 return ret;
6171
6172         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6173                 ret = intel_plane_atomic_check(state, plane);
6174                 if (ret) {
6175                         drm_dbg_atomic(&dev_priv->drm,
6176                                        "[PLANE:%d:%s] atomic driver check failed\n",
6177                                        plane->base.base.id, plane->base.name);
6178                         return ret;
6179                 }
6180         }
6181
6182         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6183                                             new_crtc_state, i) {
6184                 u8 old_active_planes, new_active_planes;
6185
6186                 ret = icl_check_nv12_planes(new_crtc_state);
6187                 if (ret)
6188                         return ret;
6189
6190                 /*
6191                  * On some platforms the number of active planes affects
6192                  * the planes' minimum cdclk calculation. Add such planes
6193                  * to the state before we compute the minimum cdclk.
6194                  */
6195                 if (!active_planes_affects_min_cdclk(dev_priv))
6196                         continue;
6197
6198                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6199                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
6200
6201                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
6202                         continue;
6203
6204                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
6205                 if (ret)
6206                         return ret;
6207         }
6208
6209         return 0;
6210 }
6211
6212 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
6213 {
6214         struct intel_crtc_state *crtc_state;
6215         struct intel_crtc *crtc;
6216         int i;
6217
6218         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6219                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6220                 int ret;
6221
6222                 ret = intel_crtc_atomic_check(state, crtc);
6223                 if (ret) {
6224                         drm_dbg_atomic(&i915->drm,
6225                                        "[CRTC:%d:%s] atomic driver check failed\n",
6226                                        crtc->base.base.id, crtc->base.name);
6227                         return ret;
6228                 }
6229         }
6230
6231         return 0;
6232 }
6233
6234 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
6235                                                u8 transcoders)
6236 {
6237         const struct intel_crtc_state *new_crtc_state;
6238         struct intel_crtc *crtc;
6239         int i;
6240
6241         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6242                 if (new_crtc_state->hw.enable &&
6243                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
6244                     intel_crtc_needs_modeset(new_crtc_state))
6245                         return true;
6246         }
6247
6248         return false;
6249 }
6250
6251 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
6252                                      u8 pipes)
6253 {
6254         const struct intel_crtc_state *new_crtc_state;
6255         struct intel_crtc *crtc;
6256         int i;
6257
6258         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6259                 if (new_crtc_state->hw.enable &&
6260                     pipes & BIT(crtc->pipe) &&
6261                     intel_crtc_needs_modeset(new_crtc_state))
6262                         return true;
6263         }
6264
6265         return false;
6266 }
6267
6268 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
6269                                         struct intel_crtc *master_crtc)
6270 {
6271         struct drm_i915_private *i915 = to_i915(state->base.dev);
6272         struct intel_crtc_state *master_crtc_state =
6273                 intel_atomic_get_new_crtc_state(state, master_crtc);
6274         struct intel_crtc *slave_crtc;
6275
6276         if (!master_crtc_state->bigjoiner_pipes)
6277                 return 0;
6278
6279         /* sanity check */
6280         if (drm_WARN_ON(&i915->drm,
6281                         master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
6282                 return -EINVAL;
6283
6284         if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
6285                 drm_dbg_kms(&i915->drm,
6286                             "[CRTC:%d:%s] Cannot act as big joiner master "
6287                             "(need 0x%x as pipes, only 0x%x possible)\n",
6288                             master_crtc->base.base.id, master_crtc->base.name,
6289                             master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
6290                 return -EINVAL;
6291         }
6292
6293         for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6294                                          intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6295                 struct intel_crtc_state *slave_crtc_state;
6296                 int ret;
6297
6298                 slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
6299                 if (IS_ERR(slave_crtc_state))
6300                         return PTR_ERR(slave_crtc_state);
6301
6302                 /* master being enabled, slave was already configured? */
6303                 if (slave_crtc_state->uapi.enable) {
6304                         drm_dbg_kms(&i915->drm,
6305                                     "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
6306                                     "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
6307                                     slave_crtc->base.base.id, slave_crtc->base.name,
6308                                     master_crtc->base.base.id, master_crtc->base.name);
6309                         return -EINVAL;
6310                 }
6311
6312                 /*
6313                  * The state copy logic assumes the master crtc gets processed
6314                  * before the slave crtc during the main compute_config loop.
6315                  * This works because the crtcs are created in pipe order,
6316                  * and the hardware requires master pipe < slave pipe as well.
6317                  * Should that change we need to rethink the logic.
6318                  */
6319                 if (WARN_ON(drm_crtc_index(&master_crtc->base) >
6320                             drm_crtc_index(&slave_crtc->base)))
6321                         return -EINVAL;
6322
6323                 drm_dbg_kms(&i915->drm,
6324                             "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
6325                             slave_crtc->base.base.id, slave_crtc->base.name,
6326                             master_crtc->base.base.id, master_crtc->base.name);
6327
6328                 slave_crtc_state->bigjoiner_pipes =
6329                         master_crtc_state->bigjoiner_pipes;
6330
6331                 ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
6332                 if (ret)
6333                         return ret;
6334         }
6335
6336         return 0;
6337 }
6338
6339 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
6340                                  struct intel_crtc *master_crtc)
6341 {
6342         struct drm_i915_private *i915 = to_i915(state->base.dev);
6343         struct intel_crtc_state *master_crtc_state =
6344                 intel_atomic_get_new_crtc_state(state, master_crtc);
6345         struct intel_crtc *slave_crtc;
6346
6347         for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
6348                                          intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
6349                 struct intel_crtc_state *slave_crtc_state =
6350                         intel_atomic_get_new_crtc_state(state, slave_crtc);
6351
6352                 slave_crtc_state->bigjoiner_pipes = 0;
6353
6354                 intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
6355         }
6356
6357         master_crtc_state->bigjoiner_pipes = 0;
6358 }
6359
6360 /**
6361  * DOC: asynchronous flip implementation
6362  *
6363  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6364  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6365  * Correspondingly, support is currently added for primary plane only.
6366  *
6367  * Async flip can only change the plane surface address, so anything else
6368  * changing is rejected from the intel_async_flip_check_hw() function.
6369  * Once this check is cleared, flip done interrupt is enabled using
6370  * the intel_crtc_enable_flip_done() function.
6371  *
6372  * As soon as the surface address register is written, flip done interrupt is
6373  * generated and the requested events are sent to the usersapce in the interrupt
6374  * handler itself. The timestamp and sequence sent during the flip done event
6375  * correspond to the last vblank and have no relation to the actual time when
6376  * the flip done event was sent.
6377  */
6378 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6379                                        struct intel_crtc *crtc)
6380 {
6381         struct drm_i915_private *i915 = to_i915(state->base.dev);
6382         const struct intel_crtc_state *new_crtc_state =
6383                 intel_atomic_get_new_crtc_state(state, crtc);
6384         const struct intel_plane_state *old_plane_state;
6385         struct intel_plane_state *new_plane_state;
6386         struct intel_plane *plane;
6387         int i;
6388
6389         if (!new_crtc_state->uapi.async_flip)
6390                 return 0;
6391
6392         if (!new_crtc_state->uapi.active) {
6393                 drm_dbg_kms(&i915->drm,
6394                             "[CRTC:%d:%s] not active\n",
6395                             crtc->base.base.id, crtc->base.name);
6396                 return -EINVAL;
6397         }
6398
6399         if (intel_crtc_needs_modeset(new_crtc_state)) {
6400                 drm_dbg_kms(&i915->drm,
6401                             "[CRTC:%d:%s] modeset required\n",
6402                             crtc->base.base.id, crtc->base.name);
6403                 return -EINVAL;
6404         }
6405
6406         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6407                                              new_plane_state, i) {
6408                 if (plane->pipe != crtc->pipe)
6409                         continue;
6410
6411                 /*
6412                  * TODO: Async flip is only supported through the page flip IOCTL
6413                  * as of now. So support currently added for primary plane only.
6414                  * Support for other planes on platforms on which supports
6415                  * this(vlv/chv and icl+) should be added when async flip is
6416                  * enabled in the atomic IOCTL path.
6417                  */
6418                 if (!plane->async_flip) {
6419                         drm_dbg_kms(&i915->drm,
6420                                     "[PLANE:%d:%s] async flip not supported\n",
6421                                     plane->base.base.id, plane->base.name);
6422                         return -EINVAL;
6423                 }
6424
6425                 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6426                         drm_dbg_kms(&i915->drm,
6427                                     "[PLANE:%d:%s] no old or new framebuffer\n",
6428                                     plane->base.base.id, plane->base.name);
6429                         return -EINVAL;
6430                 }
6431         }
6432
6433         return 0;
6434 }
6435
6436 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6437 {
6438         struct drm_i915_private *i915 = to_i915(state->base.dev);
6439         const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6440         const struct intel_plane_state *new_plane_state, *old_plane_state;
6441         struct intel_plane *plane;
6442         int i;
6443
6444         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6445         new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6446
6447         if (!new_crtc_state->uapi.async_flip)
6448                 return 0;
6449
6450         if (!new_crtc_state->hw.active) {
6451                 drm_dbg_kms(&i915->drm,
6452                             "[CRTC:%d:%s] not active\n",
6453                             crtc->base.base.id, crtc->base.name);
6454                 return -EINVAL;
6455         }
6456
6457         if (intel_crtc_needs_modeset(new_crtc_state)) {
6458                 drm_dbg_kms(&i915->drm,
6459                             "[CRTC:%d:%s] modeset required\n",
6460                             crtc->base.base.id, crtc->base.name);
6461                 return -EINVAL;
6462         }
6463
6464         if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6465                 drm_dbg_kms(&i915->drm,
6466                             "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6467                             crtc->base.base.id, crtc->base.name);
6468                 return -EINVAL;
6469         }
6470
6471         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6472                                              new_plane_state, i) {
6473                 if (plane->pipe != crtc->pipe)
6474                         continue;
6475
6476                 /*
6477                  * Only async flip capable planes should be in the state
6478                  * if we're really about to ask the hardware to perform
6479                  * an async flip. We should never get this far otherwise.
6480                  */
6481                 if (drm_WARN_ON(&i915->drm,
6482                                 new_crtc_state->do_async_flip && !plane->async_flip))
6483                         return -EINVAL;
6484
6485                 /*
6486                  * Only check async flip capable planes other planes
6487                  * may be involved in the initial commit due to
6488                  * the wm0/ddb optimization.
6489                  *
6490                  * TODO maybe should track which planes actually
6491                  * were requested to do the async flip...
6492                  */
6493                 if (!plane->async_flip)
6494                         continue;
6495
6496                 /*
6497                  * FIXME: This check is kept generic for all platforms.
6498                  * Need to verify this for all gen9 platforms to enable
6499                  * this selectively if required.
6500                  */
6501                 switch (new_plane_state->hw.fb->modifier) {
6502                 case I915_FORMAT_MOD_X_TILED:
6503                 case I915_FORMAT_MOD_Y_TILED:
6504                 case I915_FORMAT_MOD_Yf_TILED:
6505                 case I915_FORMAT_MOD_4_TILED:
6506                         break;
6507                 default:
6508                         drm_dbg_kms(&i915->drm,
6509                                     "[PLANE:%d:%s] Modifier does not support async flips\n",
6510                                     plane->base.base.id, plane->base.name);
6511                         return -EINVAL;
6512                 }
6513
6514                 if (new_plane_state->hw.fb->format->num_planes > 1) {
6515                         drm_dbg_kms(&i915->drm,
6516                                     "[PLANE:%d:%s] Planar formats do not support async flips\n",
6517                                     plane->base.base.id, plane->base.name);
6518                         return -EINVAL;
6519                 }
6520
6521                 if (old_plane_state->view.color_plane[0].mapping_stride !=
6522                     new_plane_state->view.color_plane[0].mapping_stride) {
6523                         drm_dbg_kms(&i915->drm,
6524                                     "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6525                                     plane->base.base.id, plane->base.name);
6526                         return -EINVAL;
6527                 }
6528
6529                 if (old_plane_state->hw.fb->modifier !=
6530                     new_plane_state->hw.fb->modifier) {
6531                         drm_dbg_kms(&i915->drm,
6532                                     "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6533                                     plane->base.base.id, plane->base.name);
6534                         return -EINVAL;
6535                 }
6536
6537                 if (old_plane_state->hw.fb->format !=
6538                     new_plane_state->hw.fb->format) {
6539                         drm_dbg_kms(&i915->drm,
6540                                     "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6541                                     plane->base.base.id, plane->base.name);
6542                         return -EINVAL;
6543                 }
6544
6545                 if (old_plane_state->hw.rotation !=
6546                     new_plane_state->hw.rotation) {
6547                         drm_dbg_kms(&i915->drm,
6548                                     "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6549                                     plane->base.base.id, plane->base.name);
6550                         return -EINVAL;
6551                 }
6552
6553                 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6554                     !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6555                         drm_dbg_kms(&i915->drm,
6556                                     "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6557                                     plane->base.base.id, plane->base.name);
6558                         return -EINVAL;
6559                 }
6560
6561                 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6562                         drm_dbg_kms(&i915->drm,
6563                                     "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6564                                     plane->base.base.id, plane->base.name);
6565                         return -EINVAL;
6566                 }
6567
6568                 if (old_plane_state->hw.pixel_blend_mode !=
6569                     new_plane_state->hw.pixel_blend_mode) {
6570                         drm_dbg_kms(&i915->drm,
6571                                     "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6572                                     plane->base.base.id, plane->base.name);
6573                         return -EINVAL;
6574                 }
6575
6576                 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6577                         drm_dbg_kms(&i915->drm,
6578                                     "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6579                                     plane->base.base.id, plane->base.name);
6580                         return -EINVAL;
6581                 }
6582
6583                 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6584                         drm_dbg_kms(&i915->drm,
6585                                     "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6586                                     plane->base.base.id, plane->base.name);
6587                         return -EINVAL;
6588                 }
6589
6590                 /* plane decryption is allow to change only in synchronous flips */
6591                 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6592                         drm_dbg_kms(&i915->drm,
6593                                     "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6594                                     plane->base.base.id, plane->base.name);
6595                         return -EINVAL;
6596                 }
6597         }
6598
6599         return 0;
6600 }
6601
6602 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
6603 {
6604         struct drm_i915_private *i915 = to_i915(state->base.dev);
6605         struct intel_crtc_state *crtc_state;
6606         struct intel_crtc *crtc;
6607         u8 affected_pipes = 0;
6608         u8 modeset_pipes = 0;
6609         int i;
6610
6611         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6612                 affected_pipes |= crtc_state->bigjoiner_pipes;
6613                 if (intel_crtc_needs_modeset(crtc_state))
6614                         modeset_pipes |= crtc_state->bigjoiner_pipes;
6615         }
6616
6617         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6618                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6619                 if (IS_ERR(crtc_state))
6620                         return PTR_ERR(crtc_state);
6621         }
6622
6623         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6624                 int ret;
6625
6626                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6627
6628                 crtc_state->uapi.mode_changed = true;
6629
6630                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6631                 if (ret)
6632                         return ret;
6633
6634                 ret = intel_atomic_add_affected_planes(state, crtc);
6635                 if (ret)
6636                         return ret;
6637         }
6638
6639         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6640                 /* Kill old bigjoiner link, we may re-establish afterwards */
6641                 if (intel_crtc_needs_modeset(crtc_state) &&
6642                     intel_crtc_is_bigjoiner_master(crtc_state))
6643                         kill_bigjoiner_slave(state, crtc);
6644         }
6645
6646         return 0;
6647 }
6648
6649 /**
6650  * intel_atomic_check - validate state object
6651  * @dev: drm device
6652  * @_state: state to validate
6653  */
6654 int intel_atomic_check(struct drm_device *dev,
6655                        struct drm_atomic_state *_state)
6656 {
6657         struct drm_i915_private *dev_priv = to_i915(dev);
6658         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6659         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6660         struct intel_crtc *crtc;
6661         int ret, i;
6662         bool any_ms = false;
6663
6664         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6665                                             new_crtc_state, i) {
6666                 if (new_crtc_state->inherited != old_crtc_state->inherited)
6667                         new_crtc_state->uapi.mode_changed = true;
6668
6669                 if (new_crtc_state->uapi.scaling_filter !=
6670                     old_crtc_state->uapi.scaling_filter)
6671                         new_crtc_state->uapi.mode_changed = true;
6672         }
6673
6674         intel_vrr_check_modeset(state);
6675
6676         ret = drm_atomic_helper_check_modeset(dev, &state->base);
6677         if (ret)
6678                 goto fail;
6679
6680         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6681                 ret = intel_async_flip_check_uapi(state, crtc);
6682                 if (ret)
6683                         return ret;
6684         }
6685
6686         ret = intel_bigjoiner_add_affected_crtcs(state);
6687         if (ret)
6688                 goto fail;
6689
6690         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6691                                             new_crtc_state, i) {
6692                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6693                         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
6694                                 copy_bigjoiner_crtc_state_nomodeset(state, crtc);
6695                         else
6696                                 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6697                         continue;
6698                 }
6699
6700                 if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
6701                         drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6702                         continue;
6703                 }
6704
6705                 ret = intel_crtc_prepare_cleared_state(state, crtc);
6706                 if (ret)
6707                         goto fail;
6708
6709                 if (!new_crtc_state->hw.enable)
6710                         continue;
6711
6712                 ret = intel_modeset_pipe_config(state, crtc);
6713                 if (ret)
6714                         goto fail;
6715
6716                 ret = intel_atomic_check_bigjoiner(state, crtc);
6717                 if (ret)
6718                         goto fail;
6719         }
6720
6721         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6722                                             new_crtc_state, i) {
6723                 if (!intel_crtc_needs_modeset(new_crtc_state))
6724                         continue;
6725
6726                 if (new_crtc_state->hw.enable) {
6727                         ret = intel_modeset_pipe_config_late(state, crtc);
6728                         if (ret)
6729                                 goto fail;
6730                 }
6731
6732                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6733         }
6734
6735         /**
6736          * Check if fastset is allowed by external dependencies like other
6737          * pipes and transcoders.
6738          *
6739          * Right now it only forces a fullmodeset when the MST master
6740          * transcoder did not changed but the pipe of the master transcoder
6741          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6742          * in case of port synced crtcs, if one of the synced crtcs
6743          * needs a full modeset, all other synced crtcs should be
6744          * forced a full modeset.
6745          */
6746         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6747                 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6748                         continue;
6749
6750                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6751                         enum transcoder master = new_crtc_state->mst_master_transcoder;
6752
6753                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
6754                                 new_crtc_state->uapi.mode_changed = true;
6755                                 new_crtc_state->update_pipe = false;
6756                         }
6757                 }
6758
6759                 if (is_trans_port_sync_mode(new_crtc_state)) {
6760                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
6761
6762                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6763                                 trans |= BIT(new_crtc_state->master_transcoder);
6764
6765                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
6766                                 new_crtc_state->uapi.mode_changed = true;
6767                                 new_crtc_state->update_pipe = false;
6768                         }
6769                 }
6770
6771                 if (new_crtc_state->bigjoiner_pipes) {
6772                         if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
6773                                 new_crtc_state->uapi.mode_changed = true;
6774                                 new_crtc_state->update_pipe = false;
6775                         }
6776                 }
6777         }
6778
6779         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6780                                             new_crtc_state, i) {
6781                 if (!intel_crtc_needs_modeset(new_crtc_state))
6782                         continue;
6783
6784                 any_ms = true;
6785
6786                 intel_release_shared_dplls(state, crtc);
6787         }
6788
6789         if (any_ms && !check_digital_port_conflicts(state)) {
6790                 drm_dbg_kms(&dev_priv->drm,
6791                             "rejecting conflicting digital port configuration\n");
6792                 ret = -EINVAL;
6793                 goto fail;
6794         }
6795
6796         ret = drm_dp_mst_atomic_check(&state->base);
6797         if (ret)
6798                 goto fail;
6799
6800         ret = intel_atomic_check_planes(state);
6801         if (ret)
6802                 goto fail;
6803
6804         ret = intel_compute_global_watermarks(state);
6805         if (ret)
6806                 goto fail;
6807
6808         ret = intel_bw_atomic_check(state);
6809         if (ret)
6810                 goto fail;
6811
6812         ret = intel_cdclk_atomic_check(state, &any_ms);
6813         if (ret)
6814                 goto fail;
6815
6816         if (intel_any_crtc_needs_modeset(state))
6817                 any_ms = true;
6818
6819         if (any_ms) {
6820                 ret = intel_modeset_checks(state);
6821                 if (ret)
6822                         goto fail;
6823
6824                 ret = intel_modeset_calc_cdclk(state);
6825                 if (ret)
6826                         return ret;
6827         }
6828
6829         ret = intel_atomic_check_crtcs(state);
6830         if (ret)
6831                 goto fail;
6832
6833         ret = intel_fbc_atomic_check(state);
6834         if (ret)
6835                 goto fail;
6836
6837         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6838                                             new_crtc_state, i) {
6839                 intel_color_assert_luts(new_crtc_state);
6840
6841                 ret = intel_async_flip_check_hw(state, crtc);
6842                 if (ret)
6843                         goto fail;
6844
6845                 /* Either full modeset or fastset (or neither), never both */
6846                 drm_WARN_ON(&dev_priv->drm,
6847                             intel_crtc_needs_modeset(new_crtc_state) &&
6848                             intel_crtc_needs_fastset(new_crtc_state));
6849
6850                 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6851                     !intel_crtc_needs_fastset(new_crtc_state))
6852                         continue;
6853
6854                 intel_crtc_state_dump(new_crtc_state, state,
6855                                       intel_crtc_needs_modeset(new_crtc_state) ?
6856                                       "modeset" : "fastset");
6857         }
6858
6859         return 0;
6860
6861  fail:
6862         if (ret == -EDEADLK)
6863                 return ret;
6864
6865         /*
6866          * FIXME would probably be nice to know which crtc specifically
6867          * caused the failure, in cases where we can pinpoint it.
6868          */
6869         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6870                                             new_crtc_state, i)
6871                 intel_crtc_state_dump(new_crtc_state, state, "failed");
6872
6873         return ret;
6874 }
6875
6876 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6877 {
6878         struct intel_crtc_state *crtc_state;
6879         struct intel_crtc *crtc;
6880         int i, ret;
6881
6882         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6883         if (ret < 0)
6884                 return ret;
6885
6886         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6887                 if (intel_crtc_needs_color_update(crtc_state))
6888                         intel_color_prepare_commit(crtc_state);
6889         }
6890
6891         return 0;
6892 }
6893
6894 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6895                                   struct intel_crtc_state *crtc_state)
6896 {
6897         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6898
6899         if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6900                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6901
6902         if (crtc_state->has_pch_encoder) {
6903                 enum pipe pch_transcoder =
6904                         intel_crtc_pch_transcoder(crtc);
6905
6906                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6907         }
6908 }
6909
6910 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6911                                const struct intel_crtc_state *new_crtc_state)
6912 {
6913         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6914         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6915
6916         /*
6917          * Update pipe size and adjust fitter if needed: the reason for this is
6918          * that in compute_mode_changes we check the native mode (not the pfit
6919          * mode) to see if we can flip rather than do a full mode set. In the
6920          * fastboot case, we'll flip, but if we don't update the pipesrc and
6921          * pfit state, we'll end up with a big fb scanned out into the wrong
6922          * sized surface.
6923          */
6924         intel_set_pipe_src_size(new_crtc_state);
6925
6926         /* on skylake this is done by detaching scalers */
6927         if (DISPLAY_VER(dev_priv) >= 9) {
6928                 if (new_crtc_state->pch_pfit.enabled)
6929                         skl_pfit_enable(new_crtc_state);
6930         } else if (HAS_PCH_SPLIT(dev_priv)) {
6931                 if (new_crtc_state->pch_pfit.enabled)
6932                         ilk_pfit_enable(new_crtc_state);
6933                 else if (old_crtc_state->pch_pfit.enabled)
6934                         ilk_pfit_disable(old_crtc_state);
6935         }
6936
6937         /*
6938          * The register is supposedly single buffered so perhaps
6939          * not 100% correct to do this here. But SKL+ calculate
6940          * this based on the adjust pixel rate so pfit changes do
6941          * affect it and so it must be updated for fastsets.
6942          * HSW/BDW only really need this here for fastboot, after
6943          * that the value should not change without a full modeset.
6944          */
6945         if (DISPLAY_VER(dev_priv) >= 9 ||
6946             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6947                 hsw_set_linetime_wm(new_crtc_state);
6948
6949         if (new_crtc_state->seamless_m_n)
6950                 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
6951                                                &new_crtc_state->dp_m_n);
6952 }
6953
6954 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
6955                                    struct intel_crtc *crtc)
6956 {
6957         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6958         const struct intel_crtc_state *old_crtc_state =
6959                 intel_atomic_get_old_crtc_state(state, crtc);
6960         const struct intel_crtc_state *new_crtc_state =
6961                 intel_atomic_get_new_crtc_state(state, crtc);
6962         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6963
6964         /*
6965          * During modesets pipe configuration was programmed as the
6966          * CRTC was enabled.
6967          */
6968         if (!modeset) {
6969                 if (intel_crtc_needs_color_update(new_crtc_state))
6970                         intel_color_commit_arm(new_crtc_state);
6971
6972                 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6973                         bdw_set_pipemisc(new_crtc_state);
6974
6975                 if (intel_crtc_needs_fastset(new_crtc_state))
6976                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
6977         }
6978
6979         intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
6980
6981         intel_atomic_update_watermarks(state, crtc);
6982 }
6983
6984 static void commit_pipe_post_planes(struct intel_atomic_state *state,
6985                                     struct intel_crtc *crtc)
6986 {
6987         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6988         const struct intel_crtc_state *new_crtc_state =
6989                 intel_atomic_get_new_crtc_state(state, crtc);
6990
6991         /*
6992          * Disable the scaler(s) after the plane(s) so that we don't
6993          * get a catastrophic underrun even if the two operations
6994          * end up happening in two different frames.
6995          */
6996         if (DISPLAY_VER(dev_priv) >= 9 &&
6997             !intel_crtc_needs_modeset(new_crtc_state))
6998                 skl_detach_scalers(new_crtc_state);
6999 }
7000
7001 static void intel_enable_crtc(struct intel_atomic_state *state,
7002                               struct intel_crtc *crtc)
7003 {
7004         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7005         const struct intel_crtc_state *new_crtc_state =
7006                 intel_atomic_get_new_crtc_state(state, crtc);
7007
7008         if (!intel_crtc_needs_modeset(new_crtc_state))
7009                 return;
7010
7011         intel_crtc_update_active_timings(new_crtc_state);
7012
7013         dev_priv->display.funcs.display->crtc_enable(state, crtc);
7014
7015         if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
7016                 return;
7017
7018         /* vblanks work again, re-enable pipe CRC. */
7019         intel_crtc_enable_pipe_crc(crtc);
7020 }
7021
7022 static void intel_update_crtc(struct intel_atomic_state *state,
7023                               struct intel_crtc *crtc)
7024 {
7025         struct drm_i915_private *i915 = to_i915(state->base.dev);
7026         const struct intel_crtc_state *old_crtc_state =
7027                 intel_atomic_get_old_crtc_state(state, crtc);
7028         struct intel_crtc_state *new_crtc_state =
7029                 intel_atomic_get_new_crtc_state(state, crtc);
7030         bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7031
7032         if (!modeset) {
7033                 if (new_crtc_state->preload_luts &&
7034                     intel_crtc_needs_color_update(new_crtc_state))
7035                         intel_color_load_luts(new_crtc_state);
7036
7037                 intel_pre_plane_update(state, crtc);
7038
7039                 if (intel_crtc_needs_fastset(new_crtc_state))
7040                         intel_encoders_update_pipe(state, crtc);
7041
7042                 if (DISPLAY_VER(i915) >= 11 &&
7043                     intel_crtc_needs_fastset(new_crtc_state))
7044                         icl_set_pipe_chicken(new_crtc_state);
7045         }
7046
7047         intel_fbc_update(state, crtc);
7048
7049         if (!modeset &&
7050             intel_crtc_needs_color_update(new_crtc_state))
7051                 intel_color_commit_noarm(new_crtc_state);
7052
7053         intel_crtc_planes_update_noarm(state, crtc);
7054
7055         /* Perform vblank evasion around commit operation */
7056         intel_pipe_update_start(new_crtc_state);
7057
7058         commit_pipe_pre_planes(state, crtc);
7059
7060         intel_crtc_planes_update_arm(state, crtc);
7061
7062         commit_pipe_post_planes(state, crtc);
7063
7064         intel_pipe_update_end(new_crtc_state);
7065
7066         /*
7067          * We usually enable FIFO underrun interrupts as part of the
7068          * CRTC enable sequence during modesets.  But when we inherit a
7069          * valid pipe configuration from the BIOS we need to take care
7070          * of enabling them on the CRTC's first fastset.
7071          */
7072         if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
7073             old_crtc_state->inherited)
7074                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
7075 }
7076
7077 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
7078                                           struct intel_crtc_state *old_crtc_state,
7079                                           struct intel_crtc_state *new_crtc_state,
7080                                           struct intel_crtc *crtc)
7081 {
7082         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7083
7084         /*
7085          * We need to disable pipe CRC before disabling the pipe,
7086          * or we race against vblank off.
7087          */
7088         intel_crtc_disable_pipe_crc(crtc);
7089
7090         dev_priv->display.funcs.display->crtc_disable(state, crtc);
7091         crtc->active = false;
7092         intel_fbc_disable(crtc);
7093         intel_disable_shared_dpll(old_crtc_state);
7094
7095         if (!new_crtc_state->hw.active)
7096                 intel_initial_watermarks(state, crtc);
7097 }
7098
7099 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7100 {
7101         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7102         struct intel_crtc *crtc;
7103         u32 handled = 0;
7104         int i;
7105
7106         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7107                                             new_crtc_state, i) {
7108                 if (!intel_crtc_needs_modeset(new_crtc_state))
7109                         continue;
7110
7111                 if (!old_crtc_state->hw.active)
7112                         continue;
7113
7114                 intel_pre_plane_update(state, crtc);
7115                 intel_crtc_disable_planes(state, crtc);
7116         }
7117
7118         /* Only disable port sync and MST slaves */
7119         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7120                                             new_crtc_state, i) {
7121                 if (!intel_crtc_needs_modeset(new_crtc_state))
7122                         continue;
7123
7124                 if (!old_crtc_state->hw.active)
7125                         continue;
7126
7127                 /* In case of Transcoder port Sync master slave CRTCs can be
7128                  * assigned in any order and we need to make sure that
7129                  * slave CRTCs are disabled first and then master CRTC since
7130                  * Slave vblanks are masked till Master Vblanks.
7131                  */
7132                 if (!is_trans_port_sync_slave(old_crtc_state) &&
7133                     !intel_dp_mst_is_slave_trans(old_crtc_state) &&
7134                     !intel_crtc_is_bigjoiner_slave(old_crtc_state))
7135                         continue;
7136
7137                 intel_old_crtc_state_disables(state, old_crtc_state,
7138                                               new_crtc_state, crtc);
7139                 handled |= BIT(crtc->pipe);
7140         }
7141
7142         /* Disable everything else left on */
7143         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7144                                             new_crtc_state, i) {
7145                 if (!intel_crtc_needs_modeset(new_crtc_state) ||
7146                     (handled & BIT(crtc->pipe)))
7147                         continue;
7148
7149                 if (!old_crtc_state->hw.active)
7150                         continue;
7151
7152                 intel_old_crtc_state_disables(state, old_crtc_state,
7153                                               new_crtc_state, crtc);
7154         }
7155 }
7156
7157 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7158 {
7159         struct intel_crtc_state *new_crtc_state;
7160         struct intel_crtc *crtc;
7161         int i;
7162
7163         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7164                 if (!new_crtc_state->hw.active)
7165                         continue;
7166
7167                 intel_enable_crtc(state, crtc);
7168                 intel_update_crtc(state, crtc);
7169         }
7170 }
7171
7172 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7173 {
7174         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7175         struct intel_crtc *crtc;
7176         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7177         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7178         u8 update_pipes = 0, modeset_pipes = 0;
7179         int i;
7180
7181         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7182                 enum pipe pipe = crtc->pipe;
7183
7184                 if (!new_crtc_state->hw.active)
7185                         continue;
7186
7187                 /* ignore allocations for crtc's that have been turned off. */
7188                 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7189                         entries[pipe] = old_crtc_state->wm.skl.ddb;
7190                         update_pipes |= BIT(pipe);
7191                 } else {
7192                         modeset_pipes |= BIT(pipe);
7193                 }
7194         }
7195
7196         /*
7197          * Whenever the number of active pipes changes, we need to make sure we
7198          * update the pipes in the right order so that their ddb allocations
7199          * never overlap with each other between CRTC updates. Otherwise we'll
7200          * cause pipe underruns and other bad stuff.
7201          *
7202          * So first lets enable all pipes that do not need a fullmodeset as
7203          * those don't have any external dependency.
7204          */
7205         while (update_pipes) {
7206                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7207                                                     new_crtc_state, i) {
7208                         enum pipe pipe = crtc->pipe;
7209
7210                         if ((update_pipes & BIT(pipe)) == 0)
7211                                 continue;
7212
7213                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7214                                                         entries, I915_MAX_PIPES, pipe))
7215                                 continue;
7216
7217                         entries[pipe] = new_crtc_state->wm.skl.ddb;
7218                         update_pipes &= ~BIT(pipe);
7219
7220                         intel_update_crtc(state, crtc);
7221
7222                         /*
7223                          * If this is an already active pipe, it's DDB changed,
7224                          * and this isn't the last pipe that needs updating
7225                          * then we need to wait for a vblank to pass for the
7226                          * new ddb allocation to take effect.
7227                          */
7228                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7229                                                  &old_crtc_state->wm.skl.ddb) &&
7230                             (update_pipes | modeset_pipes))
7231                                 intel_crtc_wait_for_next_vblank(crtc);
7232                 }
7233         }
7234
7235         update_pipes = modeset_pipes;
7236
7237         /*
7238          * Enable all pipes that needs a modeset and do not depends on other
7239          * pipes
7240          */
7241         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7242                 enum pipe pipe = crtc->pipe;
7243
7244                 if ((modeset_pipes & BIT(pipe)) == 0)
7245                         continue;
7246
7247                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7248                     is_trans_port_sync_master(new_crtc_state) ||
7249                     intel_crtc_is_bigjoiner_master(new_crtc_state))
7250                         continue;
7251
7252                 modeset_pipes &= ~BIT(pipe);
7253
7254                 intel_enable_crtc(state, crtc);
7255         }
7256
7257         /*
7258          * Then we enable all remaining pipes that depend on other
7259          * pipes: MST slaves and port sync masters, big joiner master
7260          */
7261         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7262                 enum pipe pipe = crtc->pipe;
7263
7264                 if ((modeset_pipes & BIT(pipe)) == 0)
7265                         continue;
7266
7267                 modeset_pipes &= ~BIT(pipe);
7268
7269                 intel_enable_crtc(state, crtc);
7270         }
7271
7272         /*
7273          * Finally we do the plane updates/etc. for all pipes that got enabled.
7274          */
7275         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7276                 enum pipe pipe = crtc->pipe;
7277
7278                 if ((update_pipes & BIT(pipe)) == 0)
7279                         continue;
7280
7281                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7282                                                                         entries, I915_MAX_PIPES, pipe));
7283
7284                 entries[pipe] = new_crtc_state->wm.skl.ddb;
7285                 update_pipes &= ~BIT(pipe);
7286
7287                 intel_update_crtc(state, crtc);
7288         }
7289
7290         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7291         drm_WARN_ON(&dev_priv->drm, update_pipes);
7292 }
7293
7294 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
7295 {
7296         struct intel_atomic_state *state, *next;
7297         struct llist_node *freed;
7298
7299         freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
7300         llist_for_each_entry_safe(state, next, freed, freed)
7301                 drm_atomic_state_put(&state->base);
7302 }
7303
7304 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
7305 {
7306         struct drm_i915_private *dev_priv =
7307                 container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
7308
7309         intel_atomic_helper_free_state(dev_priv);
7310 }
7311
7312 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7313 {
7314         struct wait_queue_entry wait_fence, wait_reset;
7315         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
7316
7317         init_wait_entry(&wait_fence, 0);
7318         init_wait_entry(&wait_reset, 0);
7319         for (;;) {
7320                 prepare_to_wait(&intel_state->commit_ready.wait,
7321                                 &wait_fence, TASK_UNINTERRUPTIBLE);
7322                 prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7323                                               I915_RESET_MODESET),
7324                                 &wait_reset, TASK_UNINTERRUPTIBLE);
7325
7326
7327                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
7328                     test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
7329                         break;
7330
7331                 schedule();
7332         }
7333         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
7334         finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
7335                                   I915_RESET_MODESET),
7336                     &wait_reset);
7337 }
7338
7339 static void intel_atomic_cleanup_work(struct work_struct *work)
7340 {
7341         struct intel_atomic_state *state =
7342                 container_of(work, struct intel_atomic_state, base.commit_work);
7343         struct drm_i915_private *i915 = to_i915(state->base.dev);
7344         struct intel_crtc_state *old_crtc_state;
7345         struct intel_crtc *crtc;
7346         int i;
7347
7348         for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
7349                 intel_color_cleanup_commit(old_crtc_state);
7350
7351         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7352         drm_atomic_helper_commit_cleanup_done(&state->base);
7353         drm_atomic_state_put(&state->base);
7354
7355         intel_atomic_helper_free_state(i915);
7356 }
7357
7358 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7359 {
7360         struct drm_i915_private *i915 = to_i915(state->base.dev);
7361         struct intel_plane *plane;
7362         struct intel_plane_state *plane_state;
7363         int i;
7364
7365         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7366                 struct drm_framebuffer *fb = plane_state->hw.fb;
7367                 int cc_plane;
7368                 int ret;
7369
7370                 if (!fb)
7371                         continue;
7372
7373                 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7374                 if (cc_plane < 0)
7375                         continue;
7376
7377                 /*
7378                  * The layout of the fast clear color value expected by HW
7379                  * (the DRM ABI requiring this value to be located in fb at
7380                  * offset 0 of cc plane, plane #2 previous generations or
7381                  * plane #1 for flat ccs):
7382                  * - 4 x 4 bytes per-channel value
7383                  *   (in surface type specific float/int format provided by the fb user)
7384                  * - 8 bytes native color value used by the display
7385                  *   (converted/written by GPU during a fast clear operation using the
7386                  *    above per-channel values)
7387                  *
7388                  * The commit's FB prepare hook already ensured that FB obj is pinned and the
7389                  * caller made sure that the object is synced wrt. the related color clear value
7390                  * GPU write on it.
7391                  */
7392                 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7393                                                      fb->offsets[cc_plane] + 16,
7394                                                      &plane_state->ccval,
7395                                                      sizeof(plane_state->ccval));
7396                 /* The above could only fail if the FB obj has an unexpected backing store type. */
7397                 drm_WARN_ON(&i915->drm, ret);
7398         }
7399 }
7400
7401 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7402 {
7403         struct drm_device *dev = state->base.dev;
7404         struct drm_i915_private *dev_priv = to_i915(dev);
7405         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7406         struct intel_crtc *crtc;
7407         struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7408         intel_wakeref_t wakeref = 0;
7409         int i;
7410
7411         intel_atomic_commit_fence_wait(state);
7412
7413         drm_atomic_helper_wait_for_dependencies(&state->base);
7414         drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7415
7416         if (state->modeset)
7417                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
7418
7419         intel_atomic_prepare_plane_clear_colors(state);
7420
7421         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7422                                             new_crtc_state, i) {
7423                 if (intel_crtc_needs_modeset(new_crtc_state) ||
7424                     intel_crtc_needs_fastset(new_crtc_state))
7425                         intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7426         }
7427
7428         intel_commit_modeset_disables(state);
7429
7430         /* FIXME: Eventually get rid of our crtc->config pointer */
7431         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7432                 crtc->config = new_crtc_state;
7433
7434         if (state->modeset) {
7435                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7436
7437                 intel_set_cdclk_pre_plane_update(state);
7438
7439                 intel_modeset_verify_disabled(dev_priv, state);
7440         }
7441
7442         intel_sagv_pre_plane_update(state);
7443
7444         /* Complete the events for pipes that have now been disabled */
7445         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7446                 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7447
7448                 /* Complete events for now disable pipes here. */
7449                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7450                         spin_lock_irq(&dev->event_lock);
7451                         drm_crtc_send_vblank_event(&crtc->base,
7452                                                    new_crtc_state->uapi.event);
7453                         spin_unlock_irq(&dev->event_lock);
7454
7455                         new_crtc_state->uapi.event = NULL;
7456                 }
7457         }
7458
7459         intel_encoders_update_prepare(state);
7460
7461         intel_dbuf_pre_plane_update(state);
7462         intel_mbus_dbox_update(state);
7463
7464         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7465                 if (new_crtc_state->do_async_flip)
7466                         intel_crtc_enable_flip_done(state, crtc);
7467         }
7468
7469         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7470         dev_priv->display.funcs.display->commit_modeset_enables(state);
7471
7472         intel_encoders_update_complete(state);
7473
7474         if (state->modeset)
7475                 intel_set_cdclk_post_plane_update(state);
7476
7477         intel_wait_for_vblank_workers(state);
7478
7479         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7480          * already, but still need the state for the delayed optimization. To
7481          * fix this:
7482          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7483          * - schedule that vblank worker _before_ calling hw_done
7484          * - at the start of commit_tail, cancel it _synchrously
7485          * - switch over to the vblank wait helper in the core after that since
7486          *   we don't need out special handling any more.
7487          */
7488         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7489
7490         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7491                 if (new_crtc_state->do_async_flip)
7492                         intel_crtc_disable_flip_done(state, crtc);
7493         }
7494
7495         /*
7496          * Now that the vblank has passed, we can go ahead and program the
7497          * optimal watermarks on platforms that need two-step watermark
7498          * programming.
7499          *
7500          * TODO: Move this (and other cleanup) to an async worker eventually.
7501          */
7502         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7503                                             new_crtc_state, i) {
7504                 /*
7505                  * Gen2 reports pipe underruns whenever all planes are disabled.
7506                  * So re-enable underrun reporting after some planes get enabled.
7507                  *
7508                  * We do this before .optimize_watermarks() so that we have a
7509                  * chance of catching underruns with the intermediate watermarks
7510                  * vs. the new plane configuration.
7511                  */
7512                 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7513                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7514
7515                 intel_optimize_watermarks(state, crtc);
7516         }
7517
7518         intel_dbuf_post_plane_update(state);
7519         intel_psr_post_plane_update(state);
7520
7521         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7522                 intel_post_plane_update(state, crtc);
7523
7524                 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7525
7526                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
7527
7528                 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
7529                 hsw_ips_post_update(state, crtc);
7530
7531                 /*
7532                  * Activate DRRS after state readout to avoid
7533                  * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7534                  */
7535                 intel_drrs_activate(new_crtc_state);
7536
7537                 /*
7538                  * DSB cleanup is done in cleanup_work aligning with framebuffer
7539                  * cleanup. So copy and reset the dsb structure to sync with
7540                  * commit_done and later do dsb cleanup in cleanup_work.
7541                  *
7542                  * FIXME get rid of this funny new->old swapping
7543                  */
7544                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
7545         }
7546
7547         /* Underruns don't always raise interrupts, so check manually */
7548         intel_check_cpu_fifo_underruns(dev_priv);
7549         intel_check_pch_fifo_underruns(dev_priv);
7550
7551         if (state->modeset)
7552                 intel_verify_planes(state);
7553
7554         intel_sagv_post_plane_update(state);
7555
7556         drm_atomic_helper_commit_hw_done(&state->base);
7557
7558         if (state->modeset) {
7559                 /* As one of the primary mmio accessors, KMS has a high
7560                  * likelihood of triggering bugs in unclaimed access. After we
7561                  * finish modesetting, see if an error has been flagged, and if
7562                  * so enable debugging for the next modeset - and hope we catch
7563                  * the culprit.
7564                  */
7565                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7566                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
7567         }
7568         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7569
7570         /*
7571          * Defer the cleanup of the old state to a separate worker to not
7572          * impede the current task (userspace for blocking modesets) that
7573          * are executed inline. For out-of-line asynchronous modesets/flips,
7574          * deferring to a new worker seems overkill, but we would place a
7575          * schedule point (cond_resched()) here anyway to keep latencies
7576          * down.
7577          */
7578         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7579         queue_work(system_highpri_wq, &state->base.commit_work);
7580 }
7581
7582 static void intel_atomic_commit_work(struct work_struct *work)
7583 {
7584         struct intel_atomic_state *state =
7585                 container_of(work, struct intel_atomic_state, base.commit_work);
7586
7587         intel_atomic_commit_tail(state);
7588 }
7589
7590 static int
7591 intel_atomic_commit_ready(struct i915_sw_fence *fence,
7592                           enum i915_sw_fence_notify notify)
7593 {
7594         struct intel_atomic_state *state =
7595                 container_of(fence, struct intel_atomic_state, commit_ready);
7596
7597         switch (notify) {
7598         case FENCE_COMPLETE:
7599                 /* we do blocking waits in the worker, nothing to do here */
7600                 break;
7601         case FENCE_FREE:
7602                 {
7603                         struct intel_atomic_helper *helper =
7604                                 &to_i915(state->base.dev)->display.atomic_helper;
7605
7606                         if (llist_add(&state->freed, &helper->free_list))
7607                                 schedule_work(&helper->free_work);
7608                         break;
7609                 }
7610         }
7611
7612         return NOTIFY_DONE;
7613 }
7614
7615 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7616 {
7617         struct intel_plane_state *old_plane_state, *new_plane_state;
7618         struct intel_plane *plane;
7619         int i;
7620
7621         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7622                                              new_plane_state, i)
7623                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7624                                         to_intel_frontbuffer(new_plane_state->hw.fb),
7625                                         plane->frontbuffer_bit);
7626 }
7627
7628 static int intel_atomic_commit(struct drm_device *dev,
7629                                struct drm_atomic_state *_state,
7630                                bool nonblock)
7631 {
7632         struct intel_atomic_state *state = to_intel_atomic_state(_state);
7633         struct drm_i915_private *dev_priv = to_i915(dev);
7634         int ret = 0;
7635
7636         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7637
7638         drm_atomic_state_get(&state->base);
7639         i915_sw_fence_init(&state->commit_ready,
7640                            intel_atomic_commit_ready);
7641
7642         /*
7643          * The intel_legacy_cursor_update() fast path takes care
7644          * of avoiding the vblank waits for simple cursor
7645          * movement and flips. For cursor on/off and size changes,
7646          * we want to perform the vblank waits so that watermark
7647          * updates happen during the correct frames. Gen9+ have
7648          * double buffered watermarks and so shouldn't need this.
7649          *
7650          * Unset state->legacy_cursor_update before the call to
7651          * drm_atomic_helper_setup_commit() because otherwise
7652          * drm_atomic_helper_wait_for_flip_done() is a noop and
7653          * we get FIFO underruns because we didn't wait
7654          * for vblank.
7655          *
7656          * FIXME doing watermarks and fb cleanup from a vblank worker
7657          * (assuming we had any) would solve these problems.
7658          */
7659         if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7660                 struct intel_crtc_state *new_crtc_state;
7661                 struct intel_crtc *crtc;
7662                 int i;
7663
7664                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7665                         if (new_crtc_state->wm.need_postvbl_update ||
7666                             new_crtc_state->update_wm_post)
7667                                 state->base.legacy_cursor_update = false;
7668         }
7669
7670         ret = intel_atomic_prepare_commit(state);
7671         if (ret) {
7672                 drm_dbg_atomic(&dev_priv->drm,
7673                                "Preparing state failed with %i\n", ret);
7674                 i915_sw_fence_commit(&state->commit_ready);
7675                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7676                 return ret;
7677         }
7678
7679         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7680         if (!ret)
7681                 ret = drm_atomic_helper_swap_state(&state->base, true);
7682         if (!ret)
7683                 intel_atomic_swap_global_state(state);
7684
7685         if (ret) {
7686                 struct intel_crtc_state *new_crtc_state;
7687                 struct intel_crtc *crtc;
7688                 int i;
7689
7690                 i915_sw_fence_commit(&state->commit_ready);
7691
7692                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7693                         intel_color_cleanup_commit(new_crtc_state);
7694
7695                 drm_atomic_helper_cleanup_planes(dev, &state->base);
7696                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7697                 return ret;
7698         }
7699         intel_shared_dpll_swap_state(state);
7700         intel_atomic_track_fbs(state);
7701
7702         drm_atomic_state_get(&state->base);
7703         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7704
7705         i915_sw_fence_commit(&state->commit_ready);
7706         if (nonblock && state->modeset) {
7707                 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7708         } else if (nonblock) {
7709                 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7710         } else {
7711                 if (state->modeset)
7712                         flush_workqueue(dev_priv->display.wq.modeset);
7713                 intel_atomic_commit_tail(state);
7714         }
7715
7716         return 0;
7717 }
7718
7719 /**
7720  * intel_plane_destroy - destroy a plane
7721  * @plane: plane to destroy
7722  *
7723  * Common destruction function for all types of planes (primary, cursor,
7724  * sprite).
7725  */
7726 void intel_plane_destroy(struct drm_plane *plane)
7727 {
7728         drm_plane_cleanup(plane);
7729         kfree(to_intel_plane(plane));
7730 }
7731
7732 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
7733 {
7734         struct intel_plane *plane;
7735
7736         for_each_intel_plane(&dev_priv->drm, plane) {
7737                 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
7738                                                               plane->pipe);
7739
7740                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
7741         }
7742 }
7743
7744
7745 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7746                                       struct drm_file *file)
7747 {
7748         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7749         struct drm_crtc *drmmode_crtc;
7750         struct intel_crtc *crtc;
7751
7752         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7753         if (!drmmode_crtc)
7754                 return -ENOENT;
7755
7756         crtc = to_intel_crtc(drmmode_crtc);
7757         pipe_from_crtc_id->pipe = crtc->pipe;
7758
7759         return 0;
7760 }
7761
7762 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7763 {
7764         struct drm_device *dev = encoder->base.dev;
7765         struct intel_encoder *source_encoder;
7766         u32 possible_clones = 0;
7767
7768         for_each_intel_encoder(dev, source_encoder) {
7769                 if (encoders_cloneable(encoder, source_encoder))
7770                         possible_clones |= drm_encoder_mask(&source_encoder->base);
7771         }
7772
7773         return possible_clones;
7774 }
7775
7776 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7777 {
7778         struct drm_device *dev = encoder->base.dev;
7779         struct intel_crtc *crtc;
7780         u32 possible_crtcs = 0;
7781
7782         for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7783                 possible_crtcs |= drm_crtc_mask(&crtc->base);
7784
7785         return possible_crtcs;
7786 }
7787
7788 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7789 {
7790         if (!IS_MOBILE(dev_priv))
7791                 return false;
7792
7793         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7794                 return false;
7795
7796         if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7797                 return false;
7798
7799         return true;
7800 }
7801
7802 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7803 {
7804         if (DISPLAY_VER(dev_priv) >= 9)
7805                 return false;
7806
7807         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
7808                 return false;
7809
7810         if (HAS_PCH_LPT_H(dev_priv) &&
7811             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7812                 return false;
7813
7814         /* DDI E can't be used if DDI A requires 4 lanes */
7815         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7816                 return false;
7817
7818         if (!dev_priv->display.vbt.int_crt_support)
7819                 return false;
7820
7821         return true;
7822 }
7823
7824 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
7825 {
7826         struct intel_encoder *encoder;
7827         bool dpd_is_edp = false;
7828
7829         intel_pps_unlock_regs_wa(dev_priv);
7830
7831         if (!HAS_DISPLAY(dev_priv))
7832                 return;
7833
7834         if (IS_DG2(dev_priv)) {
7835                 intel_ddi_init(dev_priv, PORT_A);
7836                 intel_ddi_init(dev_priv, PORT_B);
7837                 intel_ddi_init(dev_priv, PORT_C);
7838                 intel_ddi_init(dev_priv, PORT_D_XELPD);
7839                 intel_ddi_init(dev_priv, PORT_TC1);
7840         } else if (IS_ALDERLAKE_P(dev_priv)) {
7841                 intel_ddi_init(dev_priv, PORT_A);
7842                 intel_ddi_init(dev_priv, PORT_B);
7843                 intel_ddi_init(dev_priv, PORT_TC1);
7844                 intel_ddi_init(dev_priv, PORT_TC2);
7845                 intel_ddi_init(dev_priv, PORT_TC3);
7846                 intel_ddi_init(dev_priv, PORT_TC4);
7847                 icl_dsi_init(dev_priv);
7848         } else if (IS_ALDERLAKE_S(dev_priv)) {
7849                 intel_ddi_init(dev_priv, PORT_A);
7850                 intel_ddi_init(dev_priv, PORT_TC1);
7851                 intel_ddi_init(dev_priv, PORT_TC2);
7852                 intel_ddi_init(dev_priv, PORT_TC3);
7853                 intel_ddi_init(dev_priv, PORT_TC4);
7854         } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
7855                 intel_ddi_init(dev_priv, PORT_A);
7856                 intel_ddi_init(dev_priv, PORT_B);
7857                 intel_ddi_init(dev_priv, PORT_TC1);
7858                 intel_ddi_init(dev_priv, PORT_TC2);
7859         } else if (DISPLAY_VER(dev_priv) >= 12) {
7860                 intel_ddi_init(dev_priv, PORT_A);
7861                 intel_ddi_init(dev_priv, PORT_B);
7862                 intel_ddi_init(dev_priv, PORT_TC1);
7863                 intel_ddi_init(dev_priv, PORT_TC2);
7864                 intel_ddi_init(dev_priv, PORT_TC3);
7865                 intel_ddi_init(dev_priv, PORT_TC4);
7866                 intel_ddi_init(dev_priv, PORT_TC5);
7867                 intel_ddi_init(dev_priv, PORT_TC6);
7868                 icl_dsi_init(dev_priv);
7869         } else if (IS_JSL_EHL(dev_priv)) {
7870                 intel_ddi_init(dev_priv, PORT_A);
7871                 intel_ddi_init(dev_priv, PORT_B);
7872                 intel_ddi_init(dev_priv, PORT_C);
7873                 intel_ddi_init(dev_priv, PORT_D);
7874                 icl_dsi_init(dev_priv);
7875         } else if (DISPLAY_VER(dev_priv) == 11) {
7876                 intel_ddi_init(dev_priv, PORT_A);
7877                 intel_ddi_init(dev_priv, PORT_B);
7878                 intel_ddi_init(dev_priv, PORT_C);
7879                 intel_ddi_init(dev_priv, PORT_D);
7880                 intel_ddi_init(dev_priv, PORT_E);
7881                 intel_ddi_init(dev_priv, PORT_F);
7882                 icl_dsi_init(dev_priv);
7883         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
7884                 intel_ddi_init(dev_priv, PORT_A);
7885                 intel_ddi_init(dev_priv, PORT_B);
7886                 intel_ddi_init(dev_priv, PORT_C);
7887                 vlv_dsi_init(dev_priv);
7888         } else if (DISPLAY_VER(dev_priv) >= 9) {
7889                 intel_ddi_init(dev_priv, PORT_A);
7890                 intel_ddi_init(dev_priv, PORT_B);
7891                 intel_ddi_init(dev_priv, PORT_C);
7892                 intel_ddi_init(dev_priv, PORT_D);
7893                 intel_ddi_init(dev_priv, PORT_E);
7894         } else if (HAS_DDI(dev_priv)) {
7895                 u32 found;
7896
7897                 if (intel_ddi_crt_present(dev_priv))
7898                         intel_crt_init(dev_priv);
7899
7900                 /* Haswell uses DDI functions to detect digital outputs. */
7901                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
7902                 if (found)
7903                         intel_ddi_init(dev_priv, PORT_A);
7904
7905                 found = intel_de_read(dev_priv, SFUSE_STRAP);
7906                 if (found & SFUSE_STRAP_DDIB_DETECTED)
7907                         intel_ddi_init(dev_priv, PORT_B);
7908                 if (found & SFUSE_STRAP_DDIC_DETECTED)
7909                         intel_ddi_init(dev_priv, PORT_C);
7910                 if (found & SFUSE_STRAP_DDID_DETECTED)
7911                         intel_ddi_init(dev_priv, PORT_D);
7912                 if (found & SFUSE_STRAP_DDIF_DETECTED)
7913                         intel_ddi_init(dev_priv, PORT_F);
7914         } else if (HAS_PCH_SPLIT(dev_priv)) {
7915                 int found;
7916
7917                 /*
7918                  * intel_edp_init_connector() depends on this completing first,
7919                  * to prevent the registration of both eDP and LVDS and the
7920                  * incorrect sharing of the PPS.
7921                  */
7922                 intel_lvds_init(dev_priv);
7923                 intel_crt_init(dev_priv);
7924
7925                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7926
7927                 if (ilk_has_edp_a(dev_priv))
7928                         g4x_dp_init(dev_priv, DP_A, PORT_A);
7929
7930                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7931                         /* PCH SDVOB multiplex with HDMIB */
7932                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7933                         if (!found)
7934                                 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7935                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7936                                 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7937                 }
7938
7939                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7940                         g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7941
7942                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
7943                         g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
7944
7945                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
7946                         g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
7947
7948                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
7949                         g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
7950         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7951                 bool has_edp, has_port;
7952
7953                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
7954                         intel_crt_init(dev_priv);
7955
7956                 /*
7957                  * The DP_DETECTED bit is the latched state of the DDC
7958                  * SDA pin at boot. However since eDP doesn't require DDC
7959                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
7960                  * eDP ports may have been muxed to an alternate function.
7961                  * Thus we can't rely on the DP_DETECTED bit alone to detect
7962                  * eDP ports. Consult the VBT as well as DP_DETECTED to
7963                  * detect eDP ports.
7964                  *
7965                  * Sadly the straps seem to be missing sometimes even for HDMI
7966                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
7967                  * and VBT for the presence of the port. Additionally we can't
7968                  * trust the port type the VBT declares as we've seen at least
7969                  * HDMI ports that the VBT claim are DP or eDP.
7970                  */
7971                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
7972                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
7973                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
7974                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
7975                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
7976                         g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
7977
7978                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
7979                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
7980                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
7981                         has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
7982                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
7983                         g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
7984
7985                 if (IS_CHERRYVIEW(dev_priv)) {
7986                         /*
7987                          * eDP not supported on port D,
7988                          * so no need to worry about it
7989                          */
7990                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
7991                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
7992                                 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
7993                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
7994                                 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
7995                 }
7996
7997                 vlv_dsi_init(dev_priv);
7998         } else if (IS_PINEVIEW(dev_priv)) {
7999                 intel_lvds_init(dev_priv);
8000                 intel_crt_init(dev_priv);
8001         } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
8002                 bool found = false;
8003
8004                 if (IS_MOBILE(dev_priv))
8005                         intel_lvds_init(dev_priv);
8006
8007                 intel_crt_init(dev_priv);
8008
8009                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8010                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
8011                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
8012                         if (!found && IS_G4X(dev_priv)) {
8013                                 drm_dbg_kms(&dev_priv->drm,
8014                                             "probing HDMI on SDVOB\n");
8015                                 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
8016                         }
8017
8018                         if (!found && IS_G4X(dev_priv))
8019                                 g4x_dp_init(dev_priv, DP_B, PORT_B);
8020                 }
8021
8022                 /* Before G4X SDVOC doesn't have its own detect register */
8023
8024                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
8025                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
8026                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
8027                 }
8028
8029                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
8030
8031                         if (IS_G4X(dev_priv)) {
8032                                 drm_dbg_kms(&dev_priv->drm,
8033                                             "probing HDMI on SDVOC\n");
8034                                 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
8035                         }
8036                         if (IS_G4X(dev_priv))
8037                                 g4x_dp_init(dev_priv, DP_C, PORT_C);
8038                 }
8039
8040                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
8041                         g4x_dp_init(dev_priv, DP_D, PORT_D);
8042
8043                 if (SUPPORTS_TV(dev_priv))
8044                         intel_tv_init(dev_priv);
8045         } else if (DISPLAY_VER(dev_priv) == 2) {
8046                 if (IS_I85X(dev_priv))
8047                         intel_lvds_init(dev_priv);
8048
8049                 intel_crt_init(dev_priv);
8050                 intel_dvo_init(dev_priv);
8051         }
8052
8053         for_each_intel_encoder(&dev_priv->drm, encoder) {
8054                 encoder->base.possible_crtcs =
8055                         intel_encoder_possible_crtcs(encoder);
8056                 encoder->base.possible_clones =
8057                         intel_encoder_possible_clones(encoder);
8058         }
8059
8060         intel_init_pch_refclk(dev_priv);
8061
8062         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
8063 }
8064
8065 static int max_dotclock(struct drm_i915_private *i915)
8066 {
8067         int max_dotclock = i915->max_dotclk_freq;
8068
8069         /* icl+ might use bigjoiner */
8070         if (DISPLAY_VER(i915) >= 11)
8071                 max_dotclock *= 2;
8072
8073         return max_dotclock;
8074 }
8075
8076 static enum drm_mode_status
8077 intel_mode_valid(struct drm_device *dev,
8078                  const struct drm_display_mode *mode)
8079 {
8080         struct drm_i915_private *dev_priv = to_i915(dev);
8081         int hdisplay_max, htotal_max;
8082         int vdisplay_max, vtotal_max;
8083
8084         /*
8085          * Can't reject DBLSCAN here because Xorg ddxen can add piles
8086          * of DBLSCAN modes to the output's mode list when they detect
8087          * the scaling mode property on the connector. And they don't
8088          * ask the kernel to validate those modes in any way until
8089          * modeset time at which point the client gets a protocol error.
8090          * So in order to not upset those clients we silently ignore the
8091          * DBLSCAN flag on such connectors. For other connectors we will
8092          * reject modes with the DBLSCAN flag in encoder->compute_config().
8093          * And we always reject DBLSCAN modes in connector->mode_valid()
8094          * as we never want such modes on the connector's mode list.
8095          */
8096
8097         if (mode->vscan > 1)
8098                 return MODE_NO_VSCAN;
8099
8100         if (mode->flags & DRM_MODE_FLAG_HSKEW)
8101                 return MODE_H_ILLEGAL;
8102
8103         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
8104                            DRM_MODE_FLAG_NCSYNC |
8105                            DRM_MODE_FLAG_PCSYNC))
8106                 return MODE_HSYNC;
8107
8108         if (mode->flags & (DRM_MODE_FLAG_BCAST |
8109                            DRM_MODE_FLAG_PIXMUX |
8110                            DRM_MODE_FLAG_CLKDIV2))
8111                 return MODE_BAD;
8112
8113         /*
8114          * Reject clearly excessive dotclocks early to
8115          * avoid having to worry about huge integers later.
8116          */
8117         if (mode->clock > max_dotclock(dev_priv))
8118                 return MODE_CLOCK_HIGH;
8119
8120         /* Transcoder timing limits */
8121         if (DISPLAY_VER(dev_priv) >= 11) {
8122                 hdisplay_max = 16384;
8123                 vdisplay_max = 8192;
8124                 htotal_max = 16384;
8125                 vtotal_max = 8192;
8126         } else if (DISPLAY_VER(dev_priv) >= 9 ||
8127                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8128                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8129                 vdisplay_max = 4096;
8130                 htotal_max = 8192;
8131                 vtotal_max = 8192;
8132         } else if (DISPLAY_VER(dev_priv) >= 3) {
8133                 hdisplay_max = 4096;
8134                 vdisplay_max = 4096;
8135                 htotal_max = 8192;
8136                 vtotal_max = 8192;
8137         } else {
8138                 hdisplay_max = 2048;
8139                 vdisplay_max = 2048;
8140                 htotal_max = 4096;
8141                 vtotal_max = 4096;
8142         }
8143
8144         if (mode->hdisplay > hdisplay_max ||
8145             mode->hsync_start > htotal_max ||
8146             mode->hsync_end > htotal_max ||
8147             mode->htotal > htotal_max)
8148                 return MODE_H_ILLEGAL;
8149
8150         if (mode->vdisplay > vdisplay_max ||
8151             mode->vsync_start > vtotal_max ||
8152             mode->vsync_end > vtotal_max ||
8153             mode->vtotal > vtotal_max)
8154                 return MODE_V_ILLEGAL;
8155
8156         if (DISPLAY_VER(dev_priv) >= 5) {
8157                 if (mode->hdisplay < 64 ||
8158                     mode->htotal - mode->hdisplay < 32)
8159                         return MODE_H_ILLEGAL;
8160
8161                 if (mode->vtotal - mode->vdisplay < 5)
8162                         return MODE_V_ILLEGAL;
8163         } else {
8164                 if (mode->htotal - mode->hdisplay < 32)
8165                         return MODE_H_ILLEGAL;
8166
8167                 if (mode->vtotal - mode->vdisplay < 3)
8168                         return MODE_V_ILLEGAL;
8169         }
8170
8171         /*
8172          * Cantiga+ cannot handle modes with a hsync front porch of 0.
8173          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8174          */
8175         if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8176             mode->hsync_start == mode->hdisplay)
8177                 return MODE_H_ILLEGAL;
8178
8179         return MODE_OK;
8180 }
8181
8182 enum drm_mode_status
8183 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8184                                 const struct drm_display_mode *mode,
8185                                 bool bigjoiner)
8186 {
8187         int plane_width_max, plane_height_max;
8188
8189         /*
8190          * intel_mode_valid() should be
8191          * sufficient on older platforms.
8192          */
8193         if (DISPLAY_VER(dev_priv) < 9)
8194                 return MODE_OK;
8195
8196         /*
8197          * Most people will probably want a fullscreen
8198          * plane so let's not advertize modes that are
8199          * too big for that.
8200          */
8201         if (DISPLAY_VER(dev_priv) >= 11) {
8202                 plane_width_max = 5120 << bigjoiner;
8203                 plane_height_max = 4320;
8204         } else {
8205                 plane_width_max = 5120;
8206                 plane_height_max = 4096;
8207         }
8208
8209         if (mode->hdisplay > plane_width_max)
8210                 return MODE_H_ILLEGAL;
8211
8212         if (mode->vdisplay > plane_height_max)
8213                 return MODE_V_ILLEGAL;
8214
8215         return MODE_OK;
8216 }
8217
8218 static const struct drm_mode_config_funcs intel_mode_funcs = {
8219         .fb_create = intel_user_framebuffer_create,
8220         .get_format_info = intel_fb_get_format_info,
8221         .output_poll_changed = intel_fbdev_output_poll_changed,
8222         .mode_valid = intel_mode_valid,
8223         .atomic_check = intel_atomic_check,
8224         .atomic_commit = intel_atomic_commit,
8225         .atomic_state_alloc = intel_atomic_state_alloc,
8226         .atomic_state_clear = intel_atomic_state_clear,
8227         .atomic_state_free = intel_atomic_state_free,
8228 };
8229
8230 static const struct intel_display_funcs skl_display_funcs = {
8231         .get_pipe_config = hsw_get_pipe_config,
8232         .crtc_enable = hsw_crtc_enable,
8233         .crtc_disable = hsw_crtc_disable,
8234         .commit_modeset_enables = skl_commit_modeset_enables,
8235         .get_initial_plane_config = skl_get_initial_plane_config,
8236 };
8237
8238 static const struct intel_display_funcs ddi_display_funcs = {
8239         .get_pipe_config = hsw_get_pipe_config,
8240         .crtc_enable = hsw_crtc_enable,
8241         .crtc_disable = hsw_crtc_disable,
8242         .commit_modeset_enables = intel_commit_modeset_enables,
8243         .get_initial_plane_config = i9xx_get_initial_plane_config,
8244 };
8245
8246 static const struct intel_display_funcs pch_split_display_funcs = {
8247         .get_pipe_config = ilk_get_pipe_config,
8248         .crtc_enable = ilk_crtc_enable,
8249         .crtc_disable = ilk_crtc_disable,
8250         .commit_modeset_enables = intel_commit_modeset_enables,
8251         .get_initial_plane_config = i9xx_get_initial_plane_config,
8252 };
8253
8254 static const struct intel_display_funcs vlv_display_funcs = {
8255         .get_pipe_config = i9xx_get_pipe_config,
8256         .crtc_enable = valleyview_crtc_enable,
8257         .crtc_disable = i9xx_crtc_disable,
8258         .commit_modeset_enables = intel_commit_modeset_enables,
8259         .get_initial_plane_config = i9xx_get_initial_plane_config,
8260 };
8261
8262 static const struct intel_display_funcs i9xx_display_funcs = {
8263         .get_pipe_config = i9xx_get_pipe_config,
8264         .crtc_enable = i9xx_crtc_enable,
8265         .crtc_disable = i9xx_crtc_disable,
8266         .commit_modeset_enables = intel_commit_modeset_enables,
8267         .get_initial_plane_config = i9xx_get_initial_plane_config,
8268 };
8269
8270 /**
8271  * intel_init_display_hooks - initialize the display modesetting hooks
8272  * @dev_priv: device private
8273  */
8274 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8275 {
8276         if (!HAS_DISPLAY(dev_priv))
8277                 return;
8278
8279         intel_color_init_hooks(dev_priv);
8280         intel_init_cdclk_hooks(dev_priv);
8281         intel_audio_hooks_init(dev_priv);
8282
8283         intel_dpll_init_clock_hook(dev_priv);
8284
8285         if (DISPLAY_VER(dev_priv) >= 9) {
8286                 dev_priv->display.funcs.display = &skl_display_funcs;
8287         } else if (HAS_DDI(dev_priv)) {
8288                 dev_priv->display.funcs.display = &ddi_display_funcs;
8289         } else if (HAS_PCH_SPLIT(dev_priv)) {
8290                 dev_priv->display.funcs.display = &pch_split_display_funcs;
8291         } else if (IS_CHERRYVIEW(dev_priv) ||
8292                    IS_VALLEYVIEW(dev_priv)) {
8293                 dev_priv->display.funcs.display = &vlv_display_funcs;
8294         } else {
8295                 dev_priv->display.funcs.display = &i9xx_display_funcs;
8296         }
8297
8298         intel_fdi_init_hook(dev_priv);
8299 }
8300
8301 void intel_modeset_init_hw(struct drm_i915_private *i915)
8302 {
8303         struct intel_cdclk_state *cdclk_state;
8304
8305         if (!HAS_DISPLAY(i915))
8306                 return;
8307
8308         cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
8309
8310         intel_update_cdclk(i915);
8311         intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
8312         cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
8313 }
8314
8315 static int intel_initial_commit(struct drm_device *dev)
8316 {
8317         struct drm_atomic_state *state = NULL;
8318         struct drm_modeset_acquire_ctx ctx;
8319         struct intel_crtc *crtc;
8320         int ret = 0;
8321
8322         state = drm_atomic_state_alloc(dev);
8323         if (!state)
8324                 return -ENOMEM;
8325
8326         drm_modeset_acquire_init(&ctx, 0);
8327
8328 retry:
8329         state->acquire_ctx = &ctx;
8330
8331         for_each_intel_crtc(dev, crtc) {
8332                 struct intel_crtc_state *crtc_state =
8333                         intel_atomic_get_crtc_state(state, crtc);
8334
8335                 if (IS_ERR(crtc_state)) {
8336                         ret = PTR_ERR(crtc_state);
8337                         goto out;
8338                 }
8339
8340                 if (crtc_state->hw.active) {
8341                         struct intel_encoder *encoder;
8342
8343                         /*
8344                          * We've not yet detected sink capabilities
8345                          * (audio,infoframes,etc.) and thus we don't want to
8346                          * force a full state recomputation yet. We want that to
8347                          * happen only for the first real commit from userspace.
8348                          * So preserve the inherited flag for the time being.
8349                          */
8350                         crtc_state->inherited = true;
8351
8352                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
8353                         if (ret)
8354                                 goto out;
8355
8356                         /*
8357                          * FIXME hack to force a LUT update to avoid the
8358                          * plane update forcing the pipe gamma on without
8359                          * having a proper LUT loaded. Remove once we
8360                          * have readout for pipe gamma enable.
8361                          */
8362                         crtc_state->uapi.color_mgmt_changed = true;
8363
8364                         for_each_intel_encoder_mask(dev, encoder,
8365                                                     crtc_state->uapi.encoder_mask) {
8366                                 if (encoder->initial_fastset_check &&
8367                                     !encoder->initial_fastset_check(encoder, crtc_state)) {
8368                                         ret = drm_atomic_add_affected_connectors(state,
8369                                                                                  &crtc->base);
8370                                         if (ret)
8371                                                 goto out;
8372                                 }
8373                         }
8374                 }
8375         }
8376
8377         ret = drm_atomic_commit(state);
8378
8379 out:
8380         if (ret == -EDEADLK) {
8381                 drm_atomic_state_clear(state);
8382                 drm_modeset_backoff(&ctx);
8383                 goto retry;
8384         }
8385
8386         drm_atomic_state_put(state);
8387
8388         drm_modeset_drop_locks(&ctx);
8389         drm_modeset_acquire_fini(&ctx);
8390
8391         return ret;
8392 }
8393
8394 static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
8395         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
8396 };
8397
8398 static void intel_mode_config_init(struct drm_i915_private *i915)
8399 {
8400         struct drm_mode_config *mode_config = &i915->drm.mode_config;
8401
8402         drm_mode_config_init(&i915->drm);
8403         INIT_LIST_HEAD(&i915->display.global.obj_list);
8404
8405         mode_config->min_width = 0;
8406         mode_config->min_height = 0;
8407
8408         mode_config->preferred_depth = 24;
8409         mode_config->prefer_shadow = 1;
8410
8411         mode_config->funcs = &intel_mode_funcs;
8412         mode_config->helper_private = &intel_mode_config_funcs;
8413
8414         mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
8415
8416         /*
8417          * Maximum framebuffer dimensions, chosen to match
8418          * the maximum render engine surface size on gen4+.
8419          */
8420         if (DISPLAY_VER(i915) >= 7) {
8421                 mode_config->max_width = 16384;
8422                 mode_config->max_height = 16384;
8423         } else if (DISPLAY_VER(i915) >= 4) {
8424                 mode_config->max_width = 8192;
8425                 mode_config->max_height = 8192;
8426         } else if (DISPLAY_VER(i915) == 3) {
8427                 mode_config->max_width = 4096;
8428                 mode_config->max_height = 4096;
8429         } else {
8430                 mode_config->max_width = 2048;
8431                 mode_config->max_height = 2048;
8432         }
8433
8434         if (IS_I845G(i915) || IS_I865G(i915)) {
8435                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
8436                 mode_config->cursor_height = 1023;
8437         } else if (IS_I830(i915) || IS_I85X(i915) ||
8438                    IS_I915G(i915) || IS_I915GM(i915)) {
8439                 mode_config->cursor_width = 64;
8440                 mode_config->cursor_height = 64;
8441         } else {
8442                 mode_config->cursor_width = 256;
8443                 mode_config->cursor_height = 256;
8444         }
8445 }
8446
8447 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
8448 {
8449         intel_atomic_global_obj_cleanup(i915);
8450         drm_mode_config_cleanup(&i915->drm);
8451 }
8452
8453 /* part #1: call before irq install */
8454 int intel_modeset_init_noirq(struct drm_i915_private *i915)
8455 {
8456         int ret;
8457
8458         if (i915_inject_probe_failure(i915))
8459                 return -ENODEV;
8460
8461         if (HAS_DISPLAY(i915)) {
8462                 ret = drm_vblank_init(&i915->drm,
8463                                       INTEL_NUM_PIPES(i915));
8464                 if (ret)
8465                         return ret;
8466         }
8467
8468         intel_bios_init(i915);
8469
8470         ret = intel_vga_register(i915);
8471         if (ret)
8472                 goto cleanup_bios;
8473
8474         /* FIXME: completely on the wrong abstraction layer */
8475         ret = intel_power_domains_init(i915);
8476         if (ret < 0)
8477                 goto cleanup_vga;
8478
8479         intel_power_domains_init_hw(i915, false);
8480
8481         if (!HAS_DISPLAY(i915))
8482                 return 0;
8483
8484         intel_dmc_init(i915);
8485
8486         i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
8487         i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
8488                                                 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
8489
8490         intel_mode_config_init(i915);
8491
8492         ret = intel_cdclk_init(i915);
8493         if (ret)
8494                 goto cleanup_vga_client_pw_domain_dmc;
8495
8496         ret = intel_color_init(i915);
8497         if (ret)
8498                 goto cleanup_vga_client_pw_domain_dmc;
8499
8500         ret = intel_dbuf_init(i915);
8501         if (ret)
8502                 goto cleanup_vga_client_pw_domain_dmc;
8503
8504         ret = intel_bw_init(i915);
8505         if (ret)
8506                 goto cleanup_vga_client_pw_domain_dmc;
8507
8508         init_llist_head(&i915->display.atomic_helper.free_list);
8509         INIT_WORK(&i915->display.atomic_helper.free_work,
8510                   intel_atomic_helper_free_state_worker);
8511
8512         intel_init_quirks(i915);
8513
8514         intel_fbc_init(i915);
8515
8516         return 0;
8517
8518 cleanup_vga_client_pw_domain_dmc:
8519         intel_dmc_fini(i915);
8520         intel_power_domains_driver_remove(i915);
8521 cleanup_vga:
8522         intel_vga_unregister(i915);
8523 cleanup_bios:
8524         intel_bios_driver_remove(i915);
8525
8526         return ret;
8527 }
8528
8529 /* part #2: call after irq install, but before gem init */
8530 int intel_modeset_init_nogem(struct drm_i915_private *i915)
8531 {
8532         struct drm_device *dev = &i915->drm;
8533         enum pipe pipe;
8534         struct intel_crtc *crtc;
8535         int ret;
8536
8537         if (!HAS_DISPLAY(i915))
8538                 return 0;
8539
8540         intel_wm_init(i915);
8541
8542         intel_panel_sanitize_ssc(i915);
8543
8544         intel_pps_setup(i915);
8545
8546         intel_gmbus_setup(i915);
8547
8548         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
8549                     INTEL_NUM_PIPES(i915),
8550                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
8551
8552         for_each_pipe(i915, pipe) {
8553                 ret = intel_crtc_init(i915, pipe);
8554                 if (ret) {
8555                         intel_mode_config_cleanup(i915);
8556                         return ret;
8557                 }
8558         }
8559
8560         intel_plane_possible_crtcs_init(i915);
8561         intel_shared_dpll_init(i915);
8562         intel_fdi_pll_freq_update(i915);
8563
8564         intel_update_czclk(i915);
8565         intel_modeset_init_hw(i915);
8566         intel_dpll_update_ref_clks(i915);
8567
8568         intel_hdcp_component_init(i915);
8569
8570         if (i915->display.cdclk.max_cdclk_freq == 0)
8571                 intel_update_max_cdclk(i915);
8572
8573         intel_hti_init(i915);
8574
8575         /* Just disable it once at startup */
8576         intel_vga_disable(i915);
8577         intel_setup_outputs(i915);
8578
8579         drm_modeset_lock_all(dev);
8580         intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
8581         intel_acpi_assign_connector_fwnodes(i915);
8582         drm_modeset_unlock_all(dev);
8583
8584         for_each_intel_crtc(dev, crtc) {
8585                 if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
8586                         continue;
8587                 intel_crtc_initial_plane_config(crtc);
8588         }
8589
8590         /*
8591          * Make sure hardware watermarks really match the state we read out.
8592          * Note that we need to do this after reconstructing the BIOS fb's
8593          * since the watermark calculation done here will use pstate->fb.
8594          */
8595         if (!HAS_GMCH(i915))
8596                 ilk_wm_sanitize(i915);
8597
8598         return 0;
8599 }
8600
8601 /* part #3: call after gem init */
8602 int intel_modeset_init(struct drm_i915_private *i915)
8603 {
8604         int ret;
8605
8606         if (!HAS_DISPLAY(i915))
8607                 return 0;
8608
8609         /*
8610          * Force all active planes to recompute their states. So that on
8611          * mode_setcrtc after probe, all the intel_plane_state variables
8612          * are already calculated and there is no assert_plane warnings
8613          * during bootup.
8614          */
8615         ret = intel_initial_commit(&i915->drm);
8616         if (ret)
8617                 drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
8618
8619         intel_overlay_setup(i915);
8620
8621         ret = intel_fbdev_init(&i915->drm);
8622         if (ret)
8623                 return ret;
8624
8625         /* Only enable hotplug handling once the fbdev is fully set up. */
8626         intel_hpd_init(i915);
8627         intel_hpd_poll_disable(i915);
8628
8629         skl_watermark_ipc_init(i915);
8630
8631         return 0;
8632 }
8633
8634 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8635 {
8636         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8637         enum transcoder cpu_transcoder = (enum transcoder)pipe;
8638         /* 640x480@60Hz, ~25175 kHz */
8639         struct dpll clock = {
8640                 .m1 = 18,
8641                 .m2 = 7,
8642                 .p1 = 13,
8643                 .p2 = 4,
8644                 .n = 2,
8645         };
8646         u32 dpll, fp;
8647         int i;
8648
8649         drm_WARN_ON(&dev_priv->drm,
8650                     i9xx_calc_dpll_params(48000, &clock) != 25154);
8651
8652         drm_dbg_kms(&dev_priv->drm,
8653                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8654                     pipe_name(pipe), clock.vco, clock.dot);
8655
8656         fp = i9xx_dpll_compute_fp(&clock);
8657         dpll = DPLL_DVO_2X_MODE |
8658                 DPLL_VGA_MODE_DIS |
8659                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8660                 PLL_P2_DIVIDE_BY_4 |
8661                 PLL_REF_INPUT_DREFCLK |
8662                 DPLL_VCO_ENABLE;
8663
8664         intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
8665                        HACTIVE(640 - 1) | HTOTAL(800 - 1));
8666         intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
8667                        HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
8668         intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
8669                        HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
8670         intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
8671                        VACTIVE(480 - 1) | VTOTAL(525 - 1));
8672         intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
8673                        VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
8674         intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
8675                        VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
8676         intel_de_write(dev_priv, PIPESRC(pipe),
8677                        PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
8678
8679         intel_de_write(dev_priv, FP0(pipe), fp);
8680         intel_de_write(dev_priv, FP1(pipe), fp);
8681
8682         /*
8683          * Apparently we need to have VGA mode enabled prior to changing
8684          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8685          * dividers, even though the register value does change.
8686          */
8687         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
8688         intel_de_write(dev_priv, DPLL(pipe), dpll);
8689
8690         /* Wait for the clocks to stabilize. */
8691         intel_de_posting_read(dev_priv, DPLL(pipe));
8692         udelay(150);
8693
8694         /* The pixel multiplier can only be updated once the
8695          * DPLL is enabled and the clocks are stable.
8696          *
8697          * So write it again.
8698          */
8699         intel_de_write(dev_priv, DPLL(pipe), dpll);
8700
8701         /* We do this three times for luck */
8702         for (i = 0; i < 3 ; i++) {
8703                 intel_de_write(dev_priv, DPLL(pipe), dpll);
8704                 intel_de_posting_read(dev_priv, DPLL(pipe));
8705                 udelay(150); /* wait for warmup */
8706         }
8707
8708         intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
8709         intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8710
8711         intel_wait_for_pipe_scanline_moving(crtc);
8712 }
8713
8714 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8715 {
8716         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8717
8718         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8719                     pipe_name(pipe));
8720
8721         drm_WARN_ON(&dev_priv->drm,
8722                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
8723         drm_WARN_ON(&dev_priv->drm,
8724                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
8725         drm_WARN_ON(&dev_priv->drm,
8726                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
8727         drm_WARN_ON(&dev_priv->drm,
8728                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
8729         drm_WARN_ON(&dev_priv->drm,
8730                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
8731
8732         intel_de_write(dev_priv, TRANSCONF(pipe), 0);
8733         intel_de_posting_read(dev_priv, TRANSCONF(pipe));
8734
8735         intel_wait_for_pipe_scanline_stopped(crtc);
8736
8737         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
8738         intel_de_posting_read(dev_priv, DPLL(pipe));
8739 }
8740
8741 void intel_display_resume(struct drm_device *dev)
8742 {
8743         struct drm_i915_private *i915 = to_i915(dev);
8744         struct drm_atomic_state *state = i915->display.restore.modeset_state;
8745         struct drm_modeset_acquire_ctx ctx;
8746         int ret;
8747
8748         if (!HAS_DISPLAY(i915))
8749                 return;
8750
8751         i915->display.restore.modeset_state = NULL;
8752         if (state)
8753                 state->acquire_ctx = &ctx;
8754
8755         drm_modeset_acquire_init(&ctx, 0);
8756
8757         while (1) {
8758                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
8759                 if (ret != -EDEADLK)
8760                         break;
8761
8762                 drm_modeset_backoff(&ctx);
8763         }
8764
8765         if (!ret)
8766                 ret = __intel_display_resume(i915, state, &ctx);
8767
8768         skl_watermark_ipc_update(i915);
8769         drm_modeset_drop_locks(&ctx);
8770         drm_modeset_acquire_fini(&ctx);
8771
8772         if (ret)
8773                 drm_err(&i915->drm,
8774                         "Restoring old state failed with %i\n", ret);
8775         if (state)
8776                 drm_atomic_state_put(state);
8777 }
8778
8779 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
8780 {
8781         struct intel_connector *connector;
8782         struct drm_connector_list_iter conn_iter;
8783
8784         /* Kill all the work that may have been queued by hpd. */
8785         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8786         for_each_intel_connector_iter(connector, &conn_iter) {
8787                 if (connector->modeset_retry_work.func)
8788                         cancel_work_sync(&connector->modeset_retry_work);
8789                 if (connector->hdcp.shim) {
8790                         cancel_delayed_work_sync(&connector->hdcp.check_work);
8791                         cancel_work_sync(&connector->hdcp.prop_work);
8792                 }
8793         }
8794         drm_connector_list_iter_end(&conn_iter);
8795 }
8796
8797 /* part #1: call before irq uninstall */
8798 void intel_modeset_driver_remove(struct drm_i915_private *i915)
8799 {
8800         if (!HAS_DISPLAY(i915))
8801                 return;
8802
8803         flush_workqueue(i915->display.wq.flip);
8804         flush_workqueue(i915->display.wq.modeset);
8805
8806         flush_work(&i915->display.atomic_helper.free_work);
8807         drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
8808
8809         /*
8810          * MST topology needs to be suspended so we don't have any calls to
8811          * fbdev after it's finalized. MST will be destroyed later as part of
8812          * drm_mode_config_cleanup()
8813          */
8814         intel_dp_mst_suspend(i915);
8815 }
8816
8817 /* part #2: call after irq uninstall */
8818 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
8819 {
8820         if (!HAS_DISPLAY(i915))
8821                 return;
8822
8823         /*
8824          * Due to the hpd irq storm handling the hotplug work can re-arm the
8825          * poll handlers. Hence disable polling after hpd handling is shut down.
8826          */
8827         intel_hpd_poll_fini(i915);
8828
8829         /* poll work can call into fbdev, hence clean that up afterwards */
8830         intel_fbdev_fini(i915);
8831
8832         intel_unregister_dsm_handler();
8833
8834         /* flush any delayed tasks or pending work */
8835         flush_scheduled_work();
8836
8837         intel_hdcp_component_fini(i915);
8838
8839         intel_mode_config_cleanup(i915);
8840
8841         intel_overlay_cleanup(i915);
8842
8843         intel_gmbus_teardown(i915);
8844
8845         destroy_workqueue(i915->display.wq.flip);
8846         destroy_workqueue(i915->display.wq.modeset);
8847
8848         intel_fbc_cleanup(i915);
8849 }
8850
8851 /* part #3: call after gem init */
8852 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
8853 {
8854         intel_dmc_fini(i915);
8855
8856         intel_power_domains_driver_remove(i915);
8857
8858         intel_vga_unregister(i915);
8859
8860         intel_bios_driver_remove(i915);
8861 }
8862
8863 bool intel_modeset_probe_defer(struct pci_dev *pdev)
8864 {
8865         struct drm_privacy_screen *privacy_screen;
8866
8867         /*
8868          * apple-gmux is needed on dual GPU MacBook Pro
8869          * to probe the panel if we're the inactive GPU.
8870          */
8871         if (vga_switcheroo_client_probe_defer(pdev))
8872                 return true;
8873
8874         /* If the LCD panel has a privacy-screen, wait for it */
8875         privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
8876         if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
8877                 return true;
8878
8879         drm_privacy_screen_put(privacy_screen);
8880
8881         return false;
8882 }
8883
8884 void intel_display_driver_register(struct drm_i915_private *i915)
8885 {
8886         if (!HAS_DISPLAY(i915))
8887                 return;
8888
8889         intel_display_debugfs_register(i915);
8890
8891         /* Must be done after probing outputs */
8892         intel_opregion_register(i915);
8893         intel_acpi_video_register(i915);
8894
8895         intel_audio_init(i915);
8896
8897         /*
8898          * Some ports require correctly set-up hpd registers for
8899          * detection to work properly (leading to ghost connected
8900          * connector status), e.g. VGA on gm45.  Hence we can only set
8901          * up the initial fbdev config after hpd irqs are fully
8902          * enabled. We do it last so that the async config cannot run
8903          * before the connectors are registered.
8904          */
8905         intel_fbdev_initial_config_async(i915);
8906
8907         /*
8908          * We need to coordinate the hotplugs with the asynchronous
8909          * fbdev configuration, for which we use the
8910          * fbdev->async_cookie.
8911          */
8912         drm_kms_helper_poll_init(&i915->drm);
8913 }
8914
8915 void intel_display_driver_unregister(struct drm_i915_private *i915)
8916 {
8917         if (!HAS_DISPLAY(i915))
8918                 return;
8919
8920         intel_fbdev_unregister(i915);
8921         intel_audio_deinit(i915);
8922
8923         /*
8924          * After flushing the fbdev (incl. a late async config which
8925          * will have delayed queuing of a hotplug event), then flush
8926          * the hotplug events.
8927          */
8928         drm_kms_helper_poll_fini(&i915->drm);
8929         drm_atomic_helper_shutdown(&i915->drm);
8930
8931         acpi_video_unregister();
8932         intel_opregion_unregister(i915);
8933 }
8934
8935 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
8936 {
8937         return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
8938 }